From 1c4c158327bef4f98f57ebdaf7a528845c5c29fa Mon Sep 17 00:00:00 2001 From: Alonyb Date: Mon, 10 Feb 2020 22:57:30 -0500 Subject: [PATCH 001/668] add new validations to windows workflow --- Makefile | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/Makefile b/Makefile index bfc0a8656e..0d88f76fc9 100755 --- a/Makefile +++ b/Makefile @@ -277,6 +277,18 @@ ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y) $(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@) endif which go-bindata || GO111MODULE=off GOBIN="$(GOPATH)$(DIRSEP)bin" go get github.com/jteeuwen/go-bindata/... +ifeq ($(OS),Windows_NT) + echo "%cd%" + echo "$(GOPATH)" + dir + PATH="$(PATH)$(PATHSEP)$(GOPATH)$(DIRSEP)bin" + "$(GOPATH)\bin\go-bindata.exe" -nomemcopy -o $@ -pkg assets deploy/addons/... + -gofmt -s -w $@ + @#golint: Dns should be DNS (compat sed) + @sed -i -e 's/Dns/DNS/g' $@ && rm -f ./-e + @#golint: Html should be HTML (compat sed) + @sed -i -e 's/Html/HTML/g' $@ && rm -f ./-e +else PATH="$(PATH)$(PATHSEP)$(GOPATH)$(DIRSEP)bin" go-bindata -nomemcopy -o $@ -pkg assets deploy/addons/... -gofmt -s -w $@ @#golint: Dns should be DNS (compat sed) @@ -284,15 +296,26 @@ endif @#golint: Html should be HTML (compat sed) @sed -i -e 's/Html/HTML/g' $@ && rm -f ./-e +endif + pkg/minikube/translate/translations.go: $(shell find "translations/" -type f) ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y) $(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@) endif +ifeq ($(OS),Windows_NT) + which go-bindata || GO111MODULE=off GOBIN="$(GOPATH)$(DIRSEP)bin" go get github.com/jteeuwen/go-bindata/... + PATH="$(PATH)$(PATHSEP)$(GOPATH)$(DIRSEP)bin" + "$(GOPATH)\bin\go-bindata.exe" -nomemcopy -o $@ -pkg translate translations/... + -gofmt -s -w $@ + @#golint: Json should be JSON (compat sed) + @sed -i -e 's/Json/JSON/' $@ && rm -f ./-e +else which go-bindata || GO111MODULE=off GOBIN="$(GOPATH)$(DIRSEP)bin" go get github.com/jteeuwen/go-bindata/... PATH="$(PATH)$(PATHSEP)$(GOPATH)$(DIRSEP)bin" go-bindata -nomemcopy -o $@ -pkg translate translations/... -gofmt -s -w $@ @#golint: Json should be JSON (compat sed) @sed -i -e 's/Json/JSON/' $@ && rm -f ./-e +endif .PHONY: cross cross: minikube-linux-amd64 minikube-linux-arm64 minikube-darwin-amd64 minikube-windows-amd64.exe ## Build minikube for all platform From bcb71106b1ec553f82dfe0500b9b68de72ddadad Mon Sep 17 00:00:00 2001 From: Alonyb Date: Mon, 10 Feb 2020 22:57:43 -0500 Subject: [PATCH 002/668] add windows base yml --- .github/workflows/main.yml | 271 ++++++++++++++++++++----------------- 1 file changed, 150 insertions(+), 121 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 2ec7b9b32c..33e54c3319 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -6,139 +6,168 @@ jobs: docker_ubuntu_16_04: runs-on: ubuntu-16.04 steps: - - uses: actions/checkout@v2 - - name: build binaries - run : | - make minikube-linux-amd64 - make e2e-linux-amd64 - mkdir -p report - - name: install gopogh - run: | - cd /tmp - GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true - cd - - - name: run integration test - run: | - mkdir -p /tmp/testhome - MINIKUBE_HOME=/tmp/testhome ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt - - name: generate gopogh report - run: | - export PATH=${PATH}:`go env GOPATH`/bin - go tool test2json -t < ./report/testout.txt > ./report/testout.json || true - gopogh -in ./report/testout.json -out ./report/testout.html -name "docker ubuntu" -repo github.com/kubernetes/minikube/ || true - - uses: actions/upload-artifact@v1 - with: - name: docker_on_ubuntu_16_04_report - path: report + - uses: actions/checkout@v2 + - name: build binaries + run: | + make minikube-linux-amd64 + make e2e-linux-amd64 + mkdir -p report + - name: install gopogh + run: | + cd /tmp + GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true + cd - + - name: run integration test + run: | + mkdir -p /tmp/testhome + MINIKUBE_HOME=/tmp/testhome ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt + - name: generate gopogh report + run: | + export PATH=${PATH}:`go env GOPATH`/bin + go tool test2json -t < ./report/testout.txt > ./report/testout.json || true + gopogh -in ./report/testout.json -out ./report/testout.html -name "docker ubuntu" -repo github.com/kubernetes/minikube/ || true + - uses: actions/upload-artifact@v1 + with: + name: docker_on_ubuntu_16_04_report + path: report docker_ubuntu_18_04: runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@v2 - - name: build binaries - run : | - make minikube-linux-amd64 - make e2e-linux-amd64 - mkdir -p report - - name: install gopogh - run: | - cd /tmp - GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true - cd - - - name: run integration test - run: | - mkdir -p /tmp/testhome - MINIKUBE_HOME=/tmp/testhome ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt - - name: generate gopogh report - run: | - export PATH=${PATH}:`go env GOPATH`/bin - go tool test2json -t < ./report/testout.txt > ./report/testout.json || true - gopogh -in ./report/testout.json -out ./report/testout.html -name "docker ubuntu" -repo github.com/kubernetes/minikube/ || true - - uses: actions/upload-artifact@v1 - with: - name: docker_on_ubuntu_18_04_report - path: report + - uses: actions/checkout@v2 + - name: build binaries + run: | + make minikube-linux-amd64 + make e2e-linux-amd64 + mkdir -p report + - name: install gopogh + run: | + cd /tmp + GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true + cd - + - name: run integration test + run: | + mkdir -p /tmp/testhome + MINIKUBE_HOME=/tmp/testhome ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt + - name: generate gopogh report + run: | + export PATH=${PATH}:`go env GOPATH`/bin + go tool test2json -t < ./report/testout.txt > ./report/testout.json || true + gopogh -in ./report/testout.json -out ./report/testout.html -name "docker ubuntu" -repo github.com/kubernetes/minikube/ || true + - uses: actions/upload-artifact@v1 + with: + name: docker_on_ubuntu_18_04_report + path: report docker_macos: runs-on: macos-latest steps: - - uses: actions/checkout@v2 - - name: build binaries - run : | - make minikube-darwin-amd64 - make e2e-darwin-amd64 - mkdir -p report - - name: install docker - run: | + - uses: actions/checkout@v2 + - name: build binaries + run: | + make minikube-darwin-amd64 + make e2e-darwin-amd64 + mkdir -p report + - name: install docker + run: | brew install docker-machine docker || true brew services start docker-machine || true docker version || true - - name: install gopogh - run: | - cd /tmp - GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true - cd - - - name: run integration test - run: | - mkdir -p /tmp/testhome - MINIKUBE_HOME=/tmp/testhome ./out/e2e-darwin-amd64 -minikube-start-args=--vm-driver=docker -expected-default-driver= -test.timeout=70m -test.v -binary=./out/minikube-darwin-amd64 2>&1 | tee ./report/testout.txt - - name: generate gopogh report - run: | - export PATH=${PATH}:`go env GOPATH`/bin - go tool test2json -t < ./report/testout.txt > ./report/testout.json || true - gopogh -in ./report/testout.json -out ./report/testout.html -name "docker macos" -repo github.com/kubernetes/minikube/ || true - - uses: actions/upload-artifact@v1 - with: - name: docker_on_macos_report - path: ./report + - name: install gopogh + run: | + cd /tmp + GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true + cd - + - name: run integration test + run: | + mkdir -p /tmp/testhome + MINIKUBE_HOME=/tmp/testhome ./out/e2e-darwin-amd64 -minikube-start-args=--vm-driver=docker -expected-default-driver= -test.timeout=70m -test.v -binary=./out/minikube-darwin-amd64 2>&1 | tee ./report/testout.txt + - name: generate gopogh report + run: | + export PATH=${PATH}:`go env GOPATH`/bin + go tool test2json -t < ./report/testout.txt > ./report/testout.json || true + gopogh -in ./report/testout.json -out ./report/testout.html -name "docker macos" -repo github.com/kubernetes/minikube/ || true + - uses: actions/upload-artifact@v1 + with: + name: docker_on_macos_report + path: ./report none_ubuntu16_04: runs-on: ubuntu-16.04 steps: - - uses: actions/checkout@v2 - - name: build binaries - run : | - make minikube-linux-amd64 - make e2e-linux-amd64 - mkdir -p report - - name: install gopogh - run: | - cd /tmp - GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true - cd - - - name: run integration test - run: | - mkdir -p /tmp/testhome - MINIKUBE_HOME=/tmp/testhome sudo -E ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=none -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt - - name: generate gopogh report - run: | - export PATH=${PATH}:`go env GOPATH`/bin - go tool test2json -t < ./report/testout.txt > ./report/testout.json || true - gopogh -in ./report/testout.json -out ./report/testout.html -name "docker ubuntu" -repo github.com/kubernetes/minikube/ || true - - uses: actions/upload-artifact@v1 - with: - name: none_on_ubuntu_16_04 - path: report + - uses: actions/checkout@v2 + - name: build binaries + run: | + make minikube-linux-amd64 + make e2e-linux-amd64 + mkdir -p report + - name: install gopogh + run: | + cd /tmp + GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true + cd - + - name: run integration test + run: | + mkdir -p /tmp/testhome + MINIKUBE_HOME=/tmp/testhome sudo -E ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=none -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt + - name: generate gopogh report + run: | + export PATH=${PATH}:`go env GOPATH`/bin + go tool test2json -t < ./report/testout.txt > ./report/testout.json || true + gopogh -in ./report/testout.json -out ./report/testout.html -name "docker ubuntu" -repo github.com/kubernetes/minikube/ || true + - uses: actions/upload-artifact@v1 + with: + name: none_on_ubuntu_16_04 + path: report none_ubuntu_18_04: runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@v2 - - name: build binaries - run : | - make minikube-linux-amd64 - make e2e-linux-amd64 - - name: install gopogh - run: | - cd /tmp - GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true - cd - - - name: run integration test - run: | - mkdir -p /tmp/testhome - MINIKUBE_HOME=/tmp/testhome sudo -E ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=none -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt - - name: generate gopogh report - run: | - export PATH=${PATH}:`go env GOPATH`/bin - go tool test2json -t < ./report/testout.txt > ./report/testout.json || true - gopogh -in ./report/testout.json -out ./report/testout.html -name "docker ubuntu" -repo github.com/kubernetes/minikube/ || true - - uses: actions/upload-artifact@v1 - with: - name: none_on_ubuntu_latest_report - path: report + - uses: actions/checkout@v2 + - name: build binaries + run: | + make minikube-linux-amd64 + make e2e-linux-amd64 + - name: install gopogh + run: | + cd /tmp + GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true + cd - + - name: run integration test + run: | + mkdir -p /tmp/testhome + MINIKUBE_HOME=/tmp/testhome sudo -E ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=none -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt + - name: generate gopogh report + run: | + export PATH=${PATH}:`go env GOPATH`/bin + go tool test2json -t < ./report/testout.txt > ./report/testout.json || true + gopogh -in ./report/testout.json -out ./report/testout.html -name "docker ubuntu" -repo github.com/kubernetes/minikube/ || true + - uses: actions/upload-artifact@v1 + with: + name: none_on_ubuntu_latest_report + path: report + + windows_amd_64: + runs-on: windows-latest + steps: + - uses: actions/checkout@v2 + - name: build binaries + run: | + echo "::add-path::$GOPATH\bin" + make minikube-windows-amd64.exe + make e2e-windows-amd64.exe + - name: install gopogh + run: | + # go get github.com/medyagh/gopogh@v0.0.17 + # cd - + env: + GO111MODULE: on + - name: run integration test + run: | + out/e2e-windows-amd64.exe --expected-default-driver=hyperv -minikube-start-args="--vm-driver=hyperv --hyperv-virtual-switch=primary-virtual-switch" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m + - name: generate gopogh report + run: | + echo "{}" > ./report/testout.json + go tool test2json -t > ./report/testout.json + Get-Content ./report/testout.txt | ./report/testout.json + gopogh -in ./report/testout.json -out ./report/testout.html -name "docker ubuntu" -repo github.com/kubernetes/minikube/ + - uses: actions/upload-artifact@v1 + with: + name: none_on_ubuntu_latest_report + path: report From 5a72ca3ae7446687e2918327894e33d28f2ea396 Mon Sep 17 00:00:00 2001 From: Alonyb Date: Tue, 11 Feb 2020 23:44:32 -0500 Subject: [PATCH 003/668] add new test line --- pkg/minikube/registry/drvs/hyperv/hyperv.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/minikube/registry/drvs/hyperv/hyperv.go b/pkg/minikube/registry/drvs/hyperv/hyperv.go index 9f15d0c470..72c96f47d3 100644 --- a/pkg/minikube/registry/drvs/hyperv/hyperv.go +++ b/pkg/minikube/registry/drvs/hyperv/hyperv.go @@ -88,7 +88,8 @@ func status() registry.State { // Allow no more than 2 seconds for querying state ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - + + exec.CommandContext(ctx, path, "Enable-WindowsOptionalFeature", "-Online", "-FeatureName", "Microsoft-Hyper-V -All") cmd := exec.CommandContext(ctx, path, "Get-WindowsOptionalFeature", "-FeatureName", "Microsoft-Hyper-V-All", "-Online") out, err := cmd.CombinedOutput() if err != nil { From 82bb6b1aa7c278561975f43e724ef28e45ff449c Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Thu, 13 Feb 2020 22:01:10 -0500 Subject: [PATCH 004/668] Update hyperv.go --- pkg/minikube/registry/drvs/hyperv/hyperv.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/minikube/registry/drvs/hyperv/hyperv.go b/pkg/minikube/registry/drvs/hyperv/hyperv.go index 72c96f47d3..f17b9517fe 100644 --- a/pkg/minikube/registry/drvs/hyperv/hyperv.go +++ b/pkg/minikube/registry/drvs/hyperv/hyperv.go @@ -89,7 +89,6 @@ func status() registry.State { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - exec.CommandContext(ctx, path, "Enable-WindowsOptionalFeature", "-Online", "-FeatureName", "Microsoft-Hyper-V -All") cmd := exec.CommandContext(ctx, path, "Get-WindowsOptionalFeature", "-FeatureName", "Microsoft-Hyper-V-All", "-Online") out, err := cmd.CombinedOutput() if err != nil { From dbaa6a33251465d77df7dc55c6541ba05ba7aafa Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Sun, 16 Feb 2020 22:52:11 -0500 Subject: [PATCH 005/668] add binaries, build stage --- .github/workflows/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index c45087c0cd..5826b943d2 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -15,6 +15,8 @@ jobs: run : | make minikube-linux-amd64 make e2e-linux-amd64 + make minikube-windows-amd64.exe + make e2e-windows-amd64.exe cp -r test/integration/testdata ./out whoami echo github ref $GITHUB_REF From 85cf5154417056575687f8bcd90a670315769c4b Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Sun, 16 Feb 2020 22:55:19 -0500 Subject: [PATCH 006/668] add windows stage --- .github/workflows/main.yml | 67 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 5826b943d2..2e3cbd6783 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -177,6 +177,73 @@ jobs: echo "--------------------------------------------" numberOfFailures=$(echo $STAT | jq '.NumberOfFail') if [ "$numberOfFailures" -gt 0 ];then echo "*** $numberOfFailures Failed ***";exit 2;fi + docker_windows_NT: + needs: [build_minikube] + env: + TIME_ELAPSED: time + JOB_NAME: "Docker_windows_NT" + COMMIT_STATUS: "" + runs-on: windows-latest + steps: + - uses: actions/checkout@v2 + - name: Install gopogh + run: | + find ~/ -iname gopogh.exe + echo $GOPATH + echo $GITHUB_WORKSPACE + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh.exe + ls + find ~/ -iname gopogh.exe + pwd + shell: bash + - name: Download binaries + uses: actions/download-artifact@v1 + with: + name: minikube_binaries + - name: run integration test + continue-on-error: true + run: | + set +euo pipefail + mkdir -p report + mkdir -p testhome + chmod a+x e2e-* + chmod a+x minikube-* + START_TIME=$(date -u +%s) + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe --expected-default-driver=hyperv -minikube-start-args=--vm-driver=hyperv -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt + END_TIME=$(date -u +%s) + TIME_ELAPSED=$(($END_TIME-$START_TIME)) + min=$((${TIME_ELAPSED}/60)) + sec=$((${TIME_ELAPSED}%60)) + TIME_ELAPSED="${min} min $sec seconds" + echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED} + shell: bash + - name: Generate html report + run: | + pwd + ls + echo $GITHUB_WORKSPACE + go tool test2json -t < ./report/testout.txt > ./report/testout.json || true + STAT=$(${GITHUB_WORKSPACE}$(DIRSEP)gopogh.exe -in ./report/testout.json -out ./report/testout.html -name " $GITHUB_REF" -repo "${JOB_NAME} ${GITHUB_REF} ${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true + echo status: ${STAT} + FailNum=$(echo $STAT | jq '.NumberOfFail') + TestsNum=$(echo $STAT | jq '.NumberOfTests') + GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}" + echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT} + echo ::set-env name=STAT::${STAT} + shell: bash + - name: The End Result + run: | + echo ${GOPOGH_RESULT} + echo "--------------------------------------------" + echo $STAT | jq '.FailedTests' || true + echo "--------------------------------------------" + numberOfFailures=$(echo $STAT | jq '.NumberOfFail') + if [ "$numberOfFailures" -gt 0 ];then echo "*** $numberOfFailures Failed ***";exit 2;fi + shell: bash + - uses: actions/upload-artifact@v1 + with: + name: docker_windows_NT + path: minikube_binaries/report none_ubuntu16_04: needs: [build_minikube] env: From 566eb23ccd1ca5549bf0e055b5c2ee362dcb4786 Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Sun, 16 Feb 2020 22:56:39 -0500 Subject: [PATCH 007/668] add windows report --- .github/workflows/main.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 2e3cbd6783..335deb5aa5 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -463,6 +463,15 @@ jobs: run: | mkdir -p all_reports cp -r docker_ubuntu_18_04 ./all_reports/ + - name: download results docker_windows_NT + uses: actions/download-artifact@v1 + with: + name: docker_windows_NT + - name: cp to all_report + shell: bash + run: | + mkdir -p all_reports + cp -r docker_Windows_NT ./all_reports/ - name: download results none_ubuntu16_04 uses: actions/download-artifact@v1 with: From f784001468b3f8e8887dd5aa9b7fe3fb30b0ca26 Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Sun, 16 Feb 2020 22:57:59 -0500 Subject: [PATCH 008/668] giving some love to windows workflow --- .github/workflows/main.yml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 335deb5aa5..312058d4ef 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -186,15 +186,9 @@ jobs: runs-on: windows-latest steps: - uses: actions/checkout@v2 - - name: Install gopogh + - name: Download gopogh run: | - find ~/ -iname gopogh.exe - echo $GOPATH - echo $GITHUB_WORKSPACE curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh.exe - ls - find ~/ -iname gopogh.exe - pwd shell: bash - name: Download binaries uses: actions/download-artifact@v1 From bc619df5bf15c08d405d32358b1819ac7379247c Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Sun, 16 Feb 2020 22:59:57 -0500 Subject: [PATCH 009/668] Delete linux stuff --- .github/workflows/main.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 312058d4ef..fd85349459 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -200,8 +200,6 @@ jobs: set +euo pipefail mkdir -p report mkdir -p testhome - chmod a+x e2e-* - chmod a+x minikube-* START_TIME=$(date -u +%s) KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe --expected-default-driver=hyperv -minikube-start-args=--vm-driver=hyperv -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) From f3ee969dd1343cf7960cf6d9a66f56b8807c5226 Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Sun, 16 Feb 2020 23:01:40 -0500 Subject: [PATCH 010/668] it's almost done --- .github/workflows/main.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index fd85349459..373f6e0f13 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -211,11 +211,8 @@ jobs: shell: bash - name: Generate html report run: | - pwd - ls - echo $GITHUB_WORKSPACE go tool test2json -t < ./report/testout.txt > ./report/testout.json || true - STAT=$(${GITHUB_WORKSPACE}$(DIRSEP)gopogh.exe -in ./report/testout.json -out ./report/testout.html -name " $GITHUB_REF" -repo "${JOB_NAME} ${GITHUB_REF} ${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true + STAT=$(${GITHUB_WORKSPACE}/gopogh.exe -in ./report/testout.json -out ./report/testout.html -name " $GITHUB_REF" -repo "${JOB_NAME} ${GITHUB_REF} ${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true echo status: ${STAT} FailNum=$(echo $STAT | jq '.NumberOfFail') TestsNum=$(echo $STAT | jq '.NumberOfTests') From bceb07159d4b04d1d5f2bb6ec6a268efeaa80ef6 Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Sun, 16 Feb 2020 23:03:24 -0500 Subject: [PATCH 011/668] perfect --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 373f6e0f13..9110594ed1 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -460,7 +460,7 @@ jobs: shell: bash run: | mkdir -p all_reports - cp -r docker_Windows_NT ./all_reports/ + cp -r docker_windows_NT ./all_reports/ - name: download results none_ubuntu16_04 uses: actions/download-artifact@v1 with: From 24fcd1521997020b323793ff4275f78a0db86ae7 Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Sun, 16 Feb 2020 23:07:16 -0500 Subject: [PATCH 012/668] giving some love to makefile --- Makefile | 3 --- 1 file changed, 3 deletions(-) diff --git a/Makefile b/Makefile index c56aa28f09..2f831e01d0 100755 --- a/Makefile +++ b/Makefile @@ -281,9 +281,6 @@ ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y) endif which go-bindata || GO111MODULE=off GOBIN="$(GOPATH)$(DIRSEP)bin" go get github.com/jteeuwen/go-bindata/... ifeq ($(OS),Windows_NT) - echo "%cd%" - echo "$(GOPATH)" - dir PATH="$(PATH)$(PATHSEP)$(GOPATH)$(DIRSEP)bin" "$(GOPATH)\bin\go-bindata.exe" -nomemcopy -o $@ -pkg assets deploy/addons/... -gofmt -s -w $@ From 9834408c68e23bb1f2a136b8b9dcdce7d6602179 Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Mon, 17 Feb 2020 07:43:23 -0500 Subject: [PATCH 013/668] change the order of steps --- .github/workflows/main.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 9110594ed1..86af8e60f5 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -220,6 +220,10 @@ jobs: echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT} echo ::set-env name=STAT::${STAT} shell: bash + - uses: actions/upload-artifact@v1 + with: + name: docker_windows_NT + path: minikube_binaries/report - name: The End Result run: | echo ${GOPOGH_RESULT} @@ -229,10 +233,6 @@ jobs: numberOfFailures=$(echo $STAT | jq '.NumberOfFail') if [ "$numberOfFailures" -gt 0 ];then echo "*** $numberOfFailures Failed ***";exit 2;fi shell: bash - - uses: actions/upload-artifact@v1 - with: - name: docker_windows_NT - path: minikube_binaries/report none_ubuntu16_04: needs: [build_minikube] env: From 788402e4db4a82fb3a6b003bd7880464fecc00b7 Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Mon, 17 Feb 2020 08:23:54 -0500 Subject: [PATCH 014/668] Update main.yml --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 86af8e60f5..fb26bd2596 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -223,7 +223,7 @@ jobs: - uses: actions/upload-artifact@v1 with: name: docker_windows_NT - path: minikube_binaries/report + path: report - name: The End Result run: | echo ${GOPOGH_RESULT} From a04239566948347360e3ac8373f87d0c2a0deb53 Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Wed, 19 Feb 2020 07:22:57 -0500 Subject: [PATCH 015/668] Update Makefile --- Makefile | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/Makefile b/Makefile index 2f831e01d0..28d5bc1147 100755 --- a/Makefile +++ b/Makefile @@ -280,15 +280,6 @@ ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y) $(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@) endif which go-bindata || GO111MODULE=off GOBIN="$(GOPATH)$(DIRSEP)bin" go get github.com/jteeuwen/go-bindata/... -ifeq ($(OS),Windows_NT) - PATH="$(PATH)$(PATHSEP)$(GOPATH)$(DIRSEP)bin" - "$(GOPATH)\bin\go-bindata.exe" -nomemcopy -o $@ -pkg assets deploy/addons/... - -gofmt -s -w $@ - @#golint: Dns should be DNS (compat sed) - @sed -i -e 's/Dns/DNS/g' $@ && rm -f ./-e - @#golint: Html should be HTML (compat sed) - @sed -i -e 's/Html/HTML/g' $@ && rm -f ./-e -else PATH="$(PATH)$(PATHSEP)$(GOPATH)$(DIRSEP)bin" go-bindata -nomemcopy -o $@ -pkg assets deploy/addons/... -gofmt -s -w $@ @#golint: Dns should be DNS (compat sed) @@ -296,26 +287,15 @@ else @#golint: Html should be HTML (compat sed) @sed -i -e 's/Html/HTML/g' $@ && rm -f ./-e -endif - pkg/minikube/translate/translations.go: $(shell find "translations/" -type f) ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y) $(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@) endif -ifeq ($(OS),Windows_NT) - which go-bindata || GO111MODULE=off GOBIN="$(GOPATH)$(DIRSEP)bin" go get github.com/jteeuwen/go-bindata/... - PATH="$(PATH)$(PATHSEP)$(GOPATH)$(DIRSEP)bin" - "$(GOPATH)\bin\go-bindata.exe" -nomemcopy -o $@ -pkg translate translations/... - -gofmt -s -w $@ - @#golint: Json should be JSON (compat sed) - @sed -i -e 's/Json/JSON/' $@ && rm -f ./-e -else which go-bindata || GO111MODULE=off GOBIN="$(GOPATH)$(DIRSEP)bin" go get github.com/jteeuwen/go-bindata/... PATH="$(PATH)$(PATHSEP)$(GOPATH)$(DIRSEP)bin" go-bindata -nomemcopy -o $@ -pkg translate translations/... -gofmt -s -w $@ @#golint: Json should be JSON (compat sed) @sed -i -e 's/Json/JSON/' $@ && rm -f ./-e -endif .PHONY: cross cross: minikube-linux-amd64 minikube-linux-arm64 minikube-darwin-amd64 minikube-windows-amd64.exe ## Build minikube for all platform From 8536fb76419d04bd2c75b47f2599792b6cef28a0 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 19 Feb 2020 12:53:59 -0800 Subject: [PATCH 016/668] changes and things --- cmd/minikube/cmd/config/set_test.go | 2 +- cmd/minikube/cmd/logs.go | 2 +- cmd/minikube/cmd/start.go | 14 +++---- cmd/minikube/cmd/start_test.go | 6 +-- pkg/addons/addons.go | 2 +- pkg/addons/addons_test.go | 2 +- pkg/addons/config.go | 2 +- pkg/minikube/bootstrapper/bootstrapper.go | 7 ++-- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 2 +- .../bootstrapper/bsutil/kubeadm_test.go | 24 ++++++------ pkg/minikube/bootstrapper/bsutil/kubelet.go | 2 +- .../bootstrapper/bsutil/kubelet_test.go | 12 +++--- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 37 +++++++++++++++---- pkg/minikube/cluster/cluster.go | 4 +- pkg/minikube/cluster/iso.go | 2 +- pkg/minikube/config/config.go | 14 +++---- pkg/minikube/config/node.go | 2 +- pkg/minikube/config/profile.go | 6 +-- pkg/minikube/config/profile_test.go | 8 ++-- .../.minikube2/profiles/p1/config.json | 2 +- .../.minikube2/profiles/p2/config.json | 2 +- .../profiles/p5_partial_config/config.json | 2 +- .../.minikube/profiles/p1/config.json | 2 +- .../p4_partial_profile_config/config.json | 2 +- .../p5_missing_machine_config/config.json | 2 +- .../p6_empty_machine_config/config.json | 2 +- .../p7_invalid_machine_config/config.json | 2 +- .../p8_partial_machine_config/config.json | 2 +- .../.minikube/profiles/p1/config.json | 2 +- .../p4_partial_profile_config/config.json | 2 +- .../p5_missing_machine_config/config.json | 2 +- .../p6_empty_machine_config/config.json | 2 +- .../p7_invalid_machine_config/config.json | 2 +- .../p8_partial_machine_config/config.json | 2 +- pkg/minikube/config/types.go | 6 +-- pkg/minikube/machine/cache_images.go | 2 +- pkg/minikube/machine/cluster_test.go | 34 ++++++++--------- pkg/minikube/machine/fix.go | 2 +- pkg/minikube/machine/start.go | 10 ++--- pkg/minikube/node/config.go | 4 +- pkg/minikube/node/machine.go | 4 +- pkg/minikube/node/node.go | 13 ++++--- pkg/minikube/node/start.go | 2 +- pkg/minikube/registry/drvs/docker/docker.go | 2 +- .../registry/drvs/hyperkit/hyperkit.go | 2 +- pkg/minikube/registry/drvs/hyperv/hyperv.go | 2 +- pkg/minikube/registry/drvs/kvm2/kvm2.go | 2 +- pkg/minikube/registry/drvs/none/none.go | 2 +- .../registry/drvs/parallels/parallels.go | 2 +- pkg/minikube/registry/drvs/podman/podman.go | 2 +- .../registry/drvs/virtualbox/virtualbox.go | 2 +- pkg/minikube/registry/drvs/vmware/vmware.go | 2 +- .../drvs/vmwarefusion/vmwarefusion.go | 2 +- pkg/minikube/registry/registry.go | 2 +- pkg/minikube/tunnel/cluster_inspector.go | 4 +- pkg/minikube/tunnel/cluster_inspector_test.go | 6 +-- pkg/minikube/tunnel/test_doubles.go | 6 +-- pkg/minikube/tunnel/tunnel_test.go | 4 +- .../en/docs/Contributing/drivers.en.md | 2 +- 59 files changed, 162 insertions(+), 139 deletions(-) diff --git a/cmd/minikube/cmd/config/set_test.go b/cmd/minikube/cmd/config/set_test.go index f236445ffc..acd4db256b 100644 --- a/cmd/minikube/cmd/config/set_test.go +++ b/cmd/minikube/cmd/config/set_test.go @@ -80,7 +80,7 @@ func createTestProfile(t *testing.T) { if err := os.MkdirAll(config.ProfileFolderPath(name), 0777); err != nil { t.Fatalf("error creating temporary directory") } - if err := config.DefaultLoader.WriteConfigToFile(name, &config.MachineConfig{}); err != nil { + if err := config.DefaultLoader.WriteConfigToFile(name, &config.ClusterConfig{}); err != nil { t.Fatalf("error creating temporary profile config: %v", err) } } diff --git a/cmd/minikube/cmd/logs.go b/cmd/minikube/cmd/logs.go index 4796ef54b4..934ca9c1b0 100644 --- a/cmd/minikube/cmd/logs.go +++ b/cmd/minikube/cmd/logs.go @@ -67,7 +67,7 @@ var logsCmd = &cobra.Command{ if err != nil { exit.WithError("command runner", err) } - bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper)) + bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper), viper.GetString(config.MachineProfile)) if err != nil { exit.WithError("Error getting cluster bootstrapper", err) } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index de5002a4c9..72d83bd549 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -351,7 +351,7 @@ func updateDriver(driverName string) { } } -func cacheISO(cfg *config.MachineConfig, driverName string) { +func cacheISO(cfg *config.ClusterConfig, driverName string) { if !driver.BareMetal(driverName) && !driver.IsKIC(driverName) { if err := cluster.CacheISO(*cfg); err != nil { exit.WithError("Failed to cache ISO", err) @@ -429,7 +429,7 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st return nil } -func selectDriver(existing *config.MachineConfig) registry.DriverState { +func selectDriver(existing *config.ClusterConfig) registry.DriverState { // Technically unrelated, but important to perform before detection driver.SetLibvirtURI(viper.GetString(kvmQemuURI)) @@ -464,7 +464,7 @@ func selectDriver(existing *config.MachineConfig) registry.DriverState { } // validateDriver validates that the selected driver appears sane, exits if not -func validateDriver(ds registry.DriverState, existing *config.MachineConfig) { +func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { name := ds.Name glog.Infof("validating driver %q against %+v", name, existing) if !driver.Supported(name) { @@ -717,10 +717,10 @@ func validateRegistryMirror() { } // generateCfgFromFlags generates config.Config based on flags and supplied arguments -func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) (config.MachineConfig, config.Node, error) { +func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) (config.ClusterConfig, config.Node, error) { r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)}) if err != nil { - return config.MachineConfig{}, config.Node{}, err + return config.ClusterConfig{}, config.Node{}, err } // Pick good default values for --network-plugin and --enable-default-cni based on runtime. @@ -775,7 +775,7 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) Worker: true, } - cfg := config.MachineConfig{ + cfg := config.ClusterConfig{ Name: viper.GetString(config.MachineProfile), KeepContext: viper.GetBool(keepContext), EmbedCerts: viper.GetBool(embedCerts), @@ -881,7 +881,7 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) { } // getKubernetesVersion ensures that the requested version is reasonable -func getKubernetesVersion(old *config.MachineConfig) string { +func getKubernetesVersion(old *config.ClusterConfig) string { paramVersion := viper.GetString(kubernetesVersion) if paramVersion == "" { // if the user did not specify any version then ... diff --git a/cmd/minikube/cmd/start_test.go b/cmd/minikube/cmd/start_test.go index 1003292da2..b664e3486b 100644 --- a/cmd/minikube/cmd/start_test.go +++ b/cmd/minikube/cmd/start_test.go @@ -31,7 +31,7 @@ func TestGetKuberneterVersion(t *testing.T) { description string expectedVersion string paramVersion string - cfg *cfg.MachineConfig + cfg *cfg.ClusterConfig }{ { description: "kubernetes-version not given, no config", @@ -42,7 +42,7 @@ func TestGetKuberneterVersion(t *testing.T) { description: "kubernetes-version not given, config available", expectedVersion: "v1.15.0", paramVersion: "", - cfg: &cfg.MachineConfig{KubernetesConfig: cfg.KubernetesConfig{KubernetesVersion: "v1.15.0"}}, + cfg: &cfg.ClusterConfig{KubernetesConfig: cfg.KubernetesConfig{KubernetesVersion: "v1.15.0"}}, }, { description: "kubernetes-version given, no config", @@ -53,7 +53,7 @@ func TestGetKuberneterVersion(t *testing.T) { description: "kubernetes-version given, config available", expectedVersion: "v1.16.0", paramVersion: "v1.16.0", - cfg: &cfg.MachineConfig{KubernetesConfig: cfg.KubernetesConfig{KubernetesVersion: "v1.15.0"}}, + cfg: &cfg.ClusterConfig{KubernetesConfig: cfg.KubernetesConfig{KubernetesVersion: "v1.15.0"}}, }, } diff --git a/pkg/addons/addons.go b/pkg/addons/addons.go index c1042e45be..e1ca836c37 100644 --- a/pkg/addons/addons.go +++ b/pkg/addons/addons.go @@ -88,7 +88,7 @@ func run(name, value, profile string, fns []setFn) error { } // SetBool sets a bool value -func SetBool(m *config.MachineConfig, name string, val string) error { +func SetBool(m *config.ClusterConfig, name string, val string) error { b, err := strconv.ParseBool(val) if err != nil { return err diff --git a/pkg/addons/addons_test.go b/pkg/addons/addons_test.go index 14449917a2..559f5729e7 100644 --- a/pkg/addons/addons_test.go +++ b/pkg/addons/addons_test.go @@ -44,7 +44,7 @@ func createTestProfile(t *testing.T) string { if err := os.MkdirAll(config.ProfileFolderPath(name), 0777); err != nil { t.Fatalf("error creating temporary directory") } - if err := config.DefaultLoader.WriteConfigToFile(name, &config.MachineConfig{}); err != nil { + if err := config.DefaultLoader.WriteConfigToFile(name, &config.ClusterConfig{}); err != nil { t.Fatalf("error creating temporary profile config: %v", err) } return name diff --git a/pkg/addons/config.go b/pkg/addons/config.go index 059ddf7929..46c713d69f 100644 --- a/pkg/addons/config.go +++ b/pkg/addons/config.go @@ -23,7 +23,7 @@ type setFn func(string, string, string) error // Addon represents an addon type Addon struct { name string - set func(*config.MachineConfig, string, string) error + set func(*config.ClusterConfig, string, string) error validations []setFn callbacks []setFn } diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 669a38fd57..eba5167179 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -35,10 +35,11 @@ type LogOptions struct { // Bootstrapper contains all the methods needed to bootstrap a kubernetes cluster type Bootstrapper interface { - StartCluster(config.MachineConfig) error - UpdateCluster(config.MachineConfig) error + StartCluster(config.ClusterConfig) error + UpdateCluster(config.ClusterConfig) error DeleteCluster(config.KubernetesConfig) error - WaitForCluster(config.MachineConfig, time.Duration) error + WaitForCluster(config.ClusterConfig, time.Duration) error + JoinCluster(config.ClusterConfig, config.Node, string) error // LogCommands returns a map of log type to a command which will display that log. LogCommands(LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 602c9c17de..e5d926ee12 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -36,7 +36,7 @@ import ( const remoteContainerRuntime = "remote" // GenerateKubeadmYAML generates the kubeadm.yaml file -func GenerateKubeadmYAML(mc config.MachineConfig, r cruntime.Manager) ([]byte, error) { +func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager) ([]byte, error) { k8s := mc.KubernetesConfig version, err := ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go index 6bf8f27951..922bf4b8b3 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go @@ -106,9 +106,9 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) { name string runtime string shouldErr bool - cfg config.MachineConfig + cfg config.ClusterConfig }{ - {"dns", "docker", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{DNSDomain: "1.1.1.1"}}}, + {"dns", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{DNSDomain: "1.1.1.1"}}}, } for _, version := range versions { for _, tc := range tests { @@ -172,17 +172,17 @@ func TestGenerateKubeadmYAML(t *testing.T) { name string runtime string shouldErr bool - cfg config.MachineConfig + cfg config.ClusterConfig }{ - {"default", "docker", false, config.MachineConfig{}}, - {"containerd", "containerd", false, config.MachineConfig{}}, - {"crio", "crio", false, config.MachineConfig{}}, - {"options", "docker", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}}, - {"crio-options-gates", "crio", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}}, - {"unknown-component", "docker", true, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}}, - {"containerd-api-port", "containerd", false, config.MachineConfig{Nodes: []config.Node{{Port: 12345}}}}, - {"containerd-pod-network-cidr", "containerd", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}}, - {"image-repository", "docker", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}}, + {"default", "docker", false, config.ClusterConfig{}}, + {"containerd", "containerd", false, config.ClusterConfig{}}, + {"crio", "crio", false, config.ClusterConfig{}}, + {"options", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}}, + {"crio-options-gates", "crio", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}}, + {"unknown-component", "docker", true, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}}, + {"containerd-api-port", "containerd", false, config.ClusterConfig{Nodes: []config.Node{{Port: 12345}}}}, + {"containerd-pod-network-cidr", "containerd", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}}, + {"image-repository", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}}, } for _, version := range versions { for _, tc := range tests { diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index f080a7eba7..a426cc409b 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -30,7 +30,7 @@ import ( // NewKubeletConfig generates a new systemd unit containing a configured kubelet // based on the options present in the KubernetesConfig. -func NewKubeletConfig(mc config.MachineConfig, nc config.Node, r cruntime.Manager) ([]byte, error) { +func NewKubeletConfig(mc config.ClusterConfig, nc config.Node, r cruntime.Manager) ([]byte, error) { k8s := mc.KubernetesConfig version, err := ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go index 56dc23168b..b0908e870f 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go @@ -30,13 +30,13 @@ import ( func TestGenerateKubeletConfig(t *testing.T) { tests := []struct { description string - cfg config.MachineConfig + cfg config.ClusterConfig expected string shouldErr bool }{ { description: "old docker", - cfg: config.MachineConfig{ + cfg: config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.OldestKubernetesVersion, ContainerRuntime: "docker", @@ -61,7 +61,7 @@ ExecStart=/var/lib/minikube/binaries/v1.11.10/kubelet --allow-privileged=true -- }, { description: "newest cri runtime", - cfg: config.MachineConfig{ + cfg: config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.NewestKubernetesVersion, ContainerRuntime: "cri-o", @@ -86,7 +86,7 @@ ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhoo }, { description: "default containerd runtime", - cfg: config.MachineConfig{ + cfg: config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "containerd", @@ -111,7 +111,7 @@ ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhoo }, { description: "default containerd runtime with IP override", - cfg: config.MachineConfig{ + cfg: config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "containerd", @@ -143,7 +143,7 @@ ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhoo }, { description: "docker with custom image repository", - cfg: config.MachineConfig{ + cfg: config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "docker", diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 9eb428d8fa..a136a9f8de 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -35,7 +35,6 @@ import ( "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" - "github.com/spf13/viper" "k8s.io/client-go/kubernetes" kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/minikube/pkg/drivers/kic" @@ -64,8 +63,7 @@ type Bootstrapper struct { } // NewBootstrapper creates a new kubeadm.Bootstrapper -func NewBootstrapper(api libmachine.API) (*Bootstrapper, error) { - name := viper.GetString(config.MachineProfile) +func NewBootstrapper(api libmachine.API, name string) (*Bootstrapper, error) { h, err := api.Load(name) if err != nil { return nil, errors.Wrap(err, "getting api client") @@ -149,7 +147,7 @@ func (k *Bootstrapper) createCompatSymlinks() error { } // StartCluster starts the cluster -func (k *Bootstrapper) StartCluster(cfg config.MachineConfig) error { +func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { err := bsutil.ExistingConfig(k.c) if err == nil { // if there is an existing cluster don't reconfigure it return k.restartCluster(cfg) @@ -262,7 +260,7 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error } // WaitForCluster blocks until the cluster appears to be healthy -func (k *Bootstrapper) WaitForCluster(cfg config.MachineConfig, timeout time.Duration) error { +func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Duration) error { start := time.Now() out.T(out.Waiting, "Waiting for cluster to come online ...") cp, err := config.PrimaryControlPlane(cfg) @@ -295,7 +293,7 @@ func (k *Bootstrapper) WaitForCluster(cfg config.MachineConfig, timeout time.Dur } // restartCluster restarts the Kubernetes cluster configured by kubeadm -func (k *Bootstrapper) restartCluster(cfg config.MachineConfig) error { +func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { glog.Infof("restartCluster start") start := time.Now() @@ -371,6 +369,29 @@ func (k *Bootstrapper) restartCluster(cfg config.MachineConfig) error { return nil } +// JoinCluster adds a node to an existing cluster +func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinCmd string) error { + start := time.Now() + glog.Infof("JoinCluster: %+v", cc) + defer func() { + glog.Infof("JoinCluster complete in %s", time.Since(start)) + }() + + // Join the master by specifying its token + joinCmd = fmt.Sprintf("%s --v=10 --node-name=%s", joinCmd, n.Name) + fmt.Println(joinCmd) + out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd)) + if err != nil { + return errors.Wrapf(err, "cmd failed: %s\n%s\n", joinCmd, out) + } + + if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet")); err != nil { + return errors.Wrap(err, "starting kubelet") + } + + return nil +} + // DeleteCluster removes the components that were started earlier func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { version, err := bsutil.ParseKubernetesVersion(k8s.KubernetesVersion) @@ -396,7 +417,7 @@ func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node) er } // UpdateCluster updates the cluster -func (k *Bootstrapper) UpdateCluster(cfg config.MachineConfig) error { +func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { images, err := images.Kubeadm(cfg.KubernetesConfig.ImageRepository, cfg.KubernetesConfig.KubernetesVersion) if err != nil { return errors.Wrap(err, "kubeadm images") @@ -469,7 +490,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.MachineConfig) error { } // applyKicOverlay applies the CNI plugin needed to make kic work -func (k *Bootstrapper) applyKicOverlay(cfg config.MachineConfig) error { +func (k *Bootstrapper) applyKicOverlay(cfg config.ClusterConfig) error { cmd := exec.Command("sudo", path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"), "create", fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")), "-f", "-") diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index 481bbe3cc3..a2b9e06613 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -42,12 +42,12 @@ func init() { } // Bootstrapper returns a new bootstrapper for the cluster -func Bootstrapper(api libmachine.API, bootstrapperName string) (bootstrapper.Bootstrapper, error) { +func Bootstrapper(api libmachine.API, bootstrapperName string, machineName string) (bootstrapper.Bootstrapper, error) { var b bootstrapper.Bootstrapper var err error switch bootstrapperName { case bootstrapper.Kubeadm: - b, err = kubeadm.NewBootstrapper(api) + b, err = kubeadm.NewBootstrapper(api, machineName) if err != nil { return nil, errors.Wrap(err, "getting a new kubeadm bootstrapper") } diff --git a/pkg/minikube/cluster/iso.go b/pkg/minikube/cluster/iso.go index 253acbd370..15c06b4a37 100644 --- a/pkg/minikube/cluster/iso.go +++ b/pkg/minikube/cluster/iso.go @@ -22,7 +22,7 @@ import ( ) // CacheISO downloads and caches ISO. -func CacheISO(cfg config.MachineConfig) error { +func CacheISO(cfg config.ClusterConfig) error { if driver.BareMetal(cfg.Driver) { return nil } diff --git a/pkg/minikube/config/config.go b/pkg/minikube/config/config.go index 89e2852017..0f031716dc 100644 --- a/pkg/minikube/config/config.go +++ b/pkg/minikube/config/config.go @@ -141,19 +141,19 @@ func encode(w io.Writer, m MinikubeConfig) error { } // Load loads the kubernetes and machine config for the current machine -func Load(profile string) (*MachineConfig, error) { +func Load(profile string) (*ClusterConfig, error) { return DefaultLoader.LoadConfigFromFile(profile) } // Write writes the kubernetes and machine config for the current machine -func Write(profile string, cc *MachineConfig) error { +func Write(profile string, cc *ClusterConfig) error { return DefaultLoader.WriteConfigToFile(profile, cc) } // Loader loads the kubernetes and machine config based on the machine profile name type Loader interface { - LoadConfigFromFile(profile string, miniHome ...string) (*MachineConfig, error) - WriteConfigToFile(profileName string, cc *MachineConfig, miniHome ...string) error + LoadConfigFromFile(profile string, miniHome ...string) (*ClusterConfig, error) + WriteConfigToFile(profileName string, cc *ClusterConfig, miniHome ...string) error } type simpleConfigLoader struct{} @@ -161,8 +161,8 @@ type simpleConfigLoader struct{} // DefaultLoader is the default config loader var DefaultLoader Loader = &simpleConfigLoader{} -func (c *simpleConfigLoader) LoadConfigFromFile(profileName string, miniHome ...string) (*MachineConfig, error) { - var cc MachineConfig +func (c *simpleConfigLoader) LoadConfigFromFile(profileName string, miniHome ...string) (*ClusterConfig, error) { + var cc ClusterConfig // Move to profile package path := profileFilePath(profileName, miniHome...) @@ -184,7 +184,7 @@ func (c *simpleConfigLoader) LoadConfigFromFile(profileName string, miniHome ... return &cc, nil } -func (c *simpleConfigLoader) WriteConfigToFile(profileName string, cc *MachineConfig, miniHome ...string) error { +func (c *simpleConfigLoader) WriteConfigToFile(profileName string, cc *ClusterConfig, miniHome ...string) error { // Move to profile package path := profileFilePath(profileName, miniHome...) contents, err := json.MarshalIndent(cc, "", " ") diff --git a/pkg/minikube/config/node.go b/pkg/minikube/config/node.go index 219acb5e46..1c6f050159 100644 --- a/pkg/minikube/config/node.go +++ b/pkg/minikube/config/node.go @@ -17,7 +17,7 @@ limitations under the License. package config // AddNode adds a new node config to an existing cluster. -func AddNode(cc *MachineConfig, name string, controlPlane bool, k8sVersion string, profileName string) error { +func AddNode(cc *ClusterConfig, name string, controlPlane bool, k8sVersion string, profileName string) error { node := Node{ Name: name, Worker: true, diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index c37eabac4e..5ad8572e13 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -52,7 +52,7 @@ func (p *Profile) IsValid() bool { } // PrimaryControlPlane gets the node specific config for the first created control plane -func PrimaryControlPlane(cc MachineConfig) (Node, error) { +func PrimaryControlPlane(cc ClusterConfig) (Node, error) { for _, n := range cc.Nodes { if n.ControlPlane { return n, nil @@ -86,12 +86,12 @@ func ProfileExists(name string, miniHome ...string) bool { // CreateEmptyProfile creates an empty profile and stores in $MINIKUBE_HOME/profiles//config.json func CreateEmptyProfile(name string, miniHome ...string) error { - cfg := &MachineConfig{} + cfg := &ClusterConfig{} return SaveProfile(name, cfg, miniHome...) } // SaveProfile creates an profile out of the cfg and stores in $MINIKUBE_HOME/profiles//config.json -func SaveProfile(name string, cfg *MachineConfig, miniHome ...string) error { +func SaveProfile(name string, cfg *ClusterConfig, miniHome ...string) error { data, err := json.MarshalIndent(cfg, "", " ") if err != nil { return err diff --git a/pkg/minikube/config/profile_test.go b/pkg/minikube/config/profile_test.go index aecb4c2f81..06903d1808 100644 --- a/pkg/minikube/config/profile_test.go +++ b/pkg/minikube/config/profile_test.go @@ -164,13 +164,13 @@ func TestCreateProfile(t *testing.T) { var testCases = []struct { name string - cfg *MachineConfig + cfg *ClusterConfig expectErr bool }{ - {"p_empty_config", &MachineConfig{}, false}, - {"p_partial_config", &MachineConfig{KubernetesConfig: KubernetesConfig{ + {"p_empty_config", &ClusterConfig{}, false}, + {"p_partial_config", &ClusterConfig{KubernetesConfig: KubernetesConfig{ ShouldLoadCachedImages: false}}, false}, - {"p_partial_config2", &MachineConfig{ + {"p_partial_config2", &ClusterConfig{ KeepContext: false, KubernetesConfig: KubernetesConfig{ ShouldLoadCachedImages: false}}, false}, } diff --git a/pkg/minikube/config/testdata/.minikube2/profiles/p1/config.json b/pkg/minikube/config/testdata/.minikube2/profiles/p1/config.json index 766e9c04c1..c4214bf442 100644 --- a/pkg/minikube/config/testdata/.minikube2/profiles/p1/config.json +++ b/pkg/minikube/config/testdata/.minikube2/profiles/p1/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/.minikube2/profiles/p2/config.json b/pkg/minikube/config/testdata/.minikube2/profiles/p2/config.json index 99e4b167a5..ab35410474 100644 --- a/pkg/minikube/config/testdata/.minikube2/profiles/p2/config.json +++ b/pkg/minikube/config/testdata/.minikube2/profiles/p2/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/.minikube2/profiles/p5_partial_config/config.json b/pkg/minikube/config/testdata/.minikube2/profiles/p5_partial_config/config.json index 29f62c0149..a99c56efe8 100644 --- a/pkg/minikube/config/testdata/.minikube2/profiles/p5_partial_config/config.json +++ b/pkg/minikube/config/testdata/.minikube2/profiles/p5_partial_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p1/config.json b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p1/config.json index b0e1b57105..6c826ebfc6 100644 --- a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p1/config.json +++ b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p1/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p4_partial_profile_config/config.json b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p4_partial_profile_config/config.json index 29f62c0149..a99c56efe8 100644 --- a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p4_partial_profile_config/config.json +++ b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p4_partial_profile_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p5_missing_machine_config/config.json b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p5_missing_machine_config/config.json index c1cf21b26f..6680e4b784 100644 --- a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p5_missing_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p5_missing_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p6_empty_machine_config/config.json b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p6_empty_machine_config/config.json index 667cbd7652..2bab758640 100644 --- a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p6_empty_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p6_empty_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p7_invalid_machine_config/config.json b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p7_invalid_machine_config/config.json index 7cbd2e409f..d56f53688d 100644 --- a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p7_invalid_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p7_invalid_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p8_partial_machine_config/config.json b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p8_partial_machine_config/config.json index 855b31af90..26324fc366 100644 --- a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p8_partial_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p8_partial_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p1/config.json b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p1/config.json index b0e1b57105..6c826ebfc6 100644 --- a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p1/config.json +++ b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p1/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p4_partial_profile_config/config.json b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p4_partial_profile_config/config.json index 29f62c0149..a99c56efe8 100644 --- a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p4_partial_profile_config/config.json +++ b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p4_partial_profile_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p5_missing_machine_config/config.json b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p5_missing_machine_config/config.json index c1cf21b26f..6680e4b784 100644 --- a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p5_missing_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p5_missing_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p6_empty_machine_config/config.json b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p6_empty_machine_config/config.json index 667cbd7652..2bab758640 100644 --- a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p6_empty_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p6_empty_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p7_invalid_machine_config/config.json b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p7_invalid_machine_config/config.json index 7cbd2e409f..d56f53688d 100644 --- a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p7_invalid_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p7_invalid_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p8_partial_machine_config/config.json b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p8_partial_machine_config/config.json index 855b31af90..26324fc366 100644 --- a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p8_partial_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p8_partial_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index 6513efd2d9..868ad8842e 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -27,11 +27,11 @@ import ( type Profile struct { Name string Status string // running, stopped - Config *MachineConfig + Config *ClusterConfig } -// MachineConfig contains the parameters used to start a cluster. -type MachineConfig struct { +// ClusterConfig contains the parameters used to start a cluster. +type ClusterConfig struct { Name string KeepContext bool // used by start and profile command to or not to switch kubectl's current context EmbedCerts bool // used by kubeconfig.Setup diff --git a/pkg/minikube/machine/cache_images.go b/pkg/minikube/machine/cache_images.go index 29d61b244f..301c3b02fd 100644 --- a/pkg/minikube/machine/cache_images.go +++ b/pkg/minikube/machine/cache_images.go @@ -61,7 +61,7 @@ func CacheImagesForBootstrapper(imageRepository string, version string, clusterB } // LoadImages loads previously cached images into the container runtime -func LoadImages(cc *config.MachineConfig, runner command.Runner, images []string, cacheDir string) error { +func LoadImages(cc *config.ClusterConfig, runner command.Runner, images []string, cacheDir string) error { glog.Infof("LoadImages start: %s", images) start := time.Now() diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index bf38656062..3b84c58842 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -41,7 +41,7 @@ type MockDownloader struct{} func (d MockDownloader) GetISOFileURI(isoURL string) string { return "" } func (d MockDownloader) CacheMinikubeISOFromURL(isoURL string) error { return nil } -func createMockDriverHost(c config.MachineConfig) (interface{}, error) { +func createMockDriverHost(c config.ClusterConfig) (interface{}, error) { return nil, nil } @@ -60,7 +60,7 @@ func RegisterMockDriver(t *testing.T) { } } -var defaultMachineConfig = config.MachineConfig{ +var defaultClusterConfig = config.ClusterConfig{ Driver: driver.Mock, MinikubeISO: constants.DefaultISOURL, Downloader: MockDownloader{}, @@ -76,7 +76,7 @@ func TestCreateHost(t *testing.T) { t.Fatal("Machine already exists.") } - _, err := createHost(api, defaultMachineConfig) + _, err := createHost(api, defaultClusterConfig) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -114,7 +114,7 @@ func TestStartHostExists(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) // Create an initial host. - ih, err := createHost(api, defaultMachineConfig) + ih, err := createHost(api, defaultClusterConfig) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -128,7 +128,7 @@ func TestStartHostExists(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - mc := defaultMachineConfig + mc := defaultClusterConfig mc.Name = ih.Name // This should pass without calling Create because the host exists already. h, err := StartHost(api, mc) @@ -151,7 +151,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { api := tests.NewMockAPI(t) // Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel) api.NotExistError = true - h, err := createHost(api, defaultMachineConfig) + h, err := createHost(api, defaultClusterConfig) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -159,7 +159,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - mc := defaultMachineConfig + mc := defaultClusterConfig mc.Name = h.Name // This should pass with creating host, while machine does not exist. @@ -193,7 +193,7 @@ func TestStartStoppedHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) // Create an initial host. - h, err := createHost(api, defaultMachineConfig) + h, err := createHost(api, defaultClusterConfig) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -203,7 +203,7 @@ func TestStartStoppedHost(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - mc := defaultMachineConfig + mc := defaultClusterConfig mc.Name = h.Name h, err = StartHost(api, mc) if err != nil { @@ -233,7 +233,7 @@ func TestStartHost(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - h, err := StartHost(api, defaultMachineConfig) + h, err := StartHost(api, defaultClusterConfig) if err != nil { t.Fatal("Error starting host.") } @@ -261,7 +261,7 @@ func TestStartHostConfig(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - config := config.MachineConfig{ + config := config.ClusterConfig{ Driver: driver.Mock, DockerEnv: []string{"FOO=BAR"}, DockerOpt: []string{"param=value"}, @@ -298,7 +298,7 @@ func TestStopHostError(t *testing.T) { func TestStopHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - h, err := createHost(api, defaultMachineConfig) + h, err := createHost(api, defaultClusterConfig) if err != nil { t.Errorf("createHost failed: %v", err) } @@ -314,7 +314,7 @@ func TestStopHost(t *testing.T) { func TestDeleteHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - if _, err := createHost(api, defaultMachineConfig); err != nil { + if _, err := createHost(api, defaultClusterConfig); err != nil { t.Errorf("createHost failed: %v", err) } @@ -326,7 +326,7 @@ func TestDeleteHost(t *testing.T) { func TestDeleteHostErrorDeletingVM(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - h, err := createHost(api, defaultMachineConfig) + h, err := createHost(api, defaultClusterConfig) if err != nil { t.Errorf("createHost failed: %v", err) } @@ -343,7 +343,7 @@ func TestDeleteHostErrorDeletingFiles(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) api.RemoveError = true - if _, err := createHost(api, defaultMachineConfig); err != nil { + if _, err := createHost(api, defaultClusterConfig); err != nil { t.Errorf("createHost failed: %v", err) } @@ -357,7 +357,7 @@ func TestDeleteHostErrMachineNotExist(t *testing.T) { api := tests.NewMockAPI(t) // Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel) api.NotExistError = true - _, err := createHost(api, defaultMachineConfig) + _, err := createHost(api, defaultClusterConfig) if err != nil { t.Errorf("createHost failed: %v", err) } @@ -383,7 +383,7 @@ func TestGetHostStatus(t *testing.T) { checkState(state.None.String()) - if _, err := createHost(api, defaultMachineConfig); err != nil { + if _, err := createHost(api, defaultClusterConfig); err != nil { t.Errorf("createHost failed: %v", err) } diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 2a69a37472..86ec7e2d70 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -54,7 +54,7 @@ var ( ) // fixHost fixes up a previously configured VM so that it is ready to run Kubernetes -func fixHost(api libmachine.API, mc config.MachineConfig) (*host.Host, error) { +func fixHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, error) { out.T(out.Waiting, "Reconfiguring existing host ...") start := time.Now() diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index 33687dc151..b043f58051 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -61,7 +61,7 @@ var ( ) // StartHost starts a host VM. -func StartHost(api libmachine.API, cfg config.MachineConfig) (*host.Host, error) { +func StartHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error) { // Prevent machine-driver boot races, as well as our own certificate race releaser, err := acquireMachinesLock(cfg.Name) if err != nil { @@ -85,7 +85,7 @@ func StartHost(api libmachine.API, cfg config.MachineConfig) (*host.Host, error) return fixHost(api, cfg) } -func engineOptions(cfg config.MachineConfig) *engine.Options { +func engineOptions(cfg config.ClusterConfig) *engine.Options { o := engine.Options{ Env: cfg.DockerEnv, InsecureRegistry: append([]string{constants.DefaultServiceCIDR}, cfg.InsecureRegistry...), @@ -96,7 +96,7 @@ func engineOptions(cfg config.MachineConfig) *engine.Options { return &o } -func createHost(api libmachine.API, cfg config.MachineConfig) (*host.Host, error) { +func createHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error) { glog.Infof("createHost starting for %q (driver=%q)", cfg.Name, cfg.Driver) start := time.Now() defer func() { @@ -152,7 +152,7 @@ func createHost(api libmachine.API, cfg config.MachineConfig) (*host.Host, error } // postStart are functions shared between startHost and fixHost -func postStartSetup(h *host.Host, mc config.MachineConfig) error { +func postStartSetup(h *host.Host, mc config.ClusterConfig) error { glog.Infof("post-start starting for %q (driver=%q)", h.Name, h.DriverName) start := time.Now() defer func() { @@ -225,7 +225,7 @@ func acquireMachinesLock(name string) (mutex.Releaser, error) { } // showHostInfo shows host information -func showHostInfo(cfg config.MachineConfig) { +func showHostInfo(cfg config.ClusterConfig) { if driver.BareMetal(cfg.Driver) { info, err := getHostInfo() if err == nil { diff --git a/pkg/minikube/node/config.go b/pkg/minikube/node/config.go index ba4c7f5275..ce76ded6b4 100644 --- a/pkg/minikube/node/config.go +++ b/pkg/minikube/node/config.go @@ -81,7 +81,7 @@ func showVersionInfo(k8sVersion string, cr cruntime.Manager) { } // setupKubeAdm adds any requested files into the VM before Kubernetes is started -func setupKubeAdm(mAPI libmachine.API, cfg config.MachineConfig, node config.Node) bootstrapper.Bootstrapper { +func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, node config.Node) bootstrapper.Bootstrapper { bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper)) if err != nil { exit.WithError("Failed to get bootstrapper", err) @@ -99,7 +99,7 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.MachineConfig, node config.Nod return bs } -func setupKubeconfig(h *host.Host, c *config.MachineConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { +func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { addr, err := h.Driver.GetURL() if err != nil { exit.WithError("Failed to get driver URL", err) diff --git a/pkg/minikube/node/machine.go b/pkg/minikube/node/machine.go index d66e61510e..d0c4021222 100644 --- a/pkg/minikube/node/machine.go +++ b/pkg/minikube/node/machine.go @@ -39,7 +39,7 @@ import ( "k8s.io/minikube/pkg/util/retry" ) -func startMachine(cfg *config.MachineConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { +func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { m, err := machine.NewAPIClient() if err != nil { exit.WithError("Failed to get machine client", err) @@ -68,7 +68,7 @@ func startMachine(cfg *config.MachineConfig, node *config.Node) (runner command. } // startHost starts a new minikube host using a VM or None -func startHost(api libmachine.API, mc config.MachineConfig) (*host.Host, bool) { +func startHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, bool) { exists, err := api.Exists(mc.Name) if err != nil { exit.WithError("Failed to check if machine exists", err) diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 19b51d1770..4e2f75b94b 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -38,18 +38,19 @@ const ( ) // Add adds a new node config to an existing cluster. -func Add(cc *config.MachineConfig, name string, controlPlane bool, worker bool, k8sVersion string, profileName string) (*config.Node, error) { +func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool, k8sVersion string, profileName string) (*config.Node, error) { n := config.Node{ Name: name, Worker: true, } + // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. if controlPlane { n.ControlPlane = true } - if worker { - n.Worker = true + if !worker { + n.Worker = false } if k8sVersion != "" { @@ -69,7 +70,7 @@ func Add(cc *config.MachineConfig, name string, controlPlane bool, worker bool, } // Delete stops and deletes the given node from the given cluster -func Delete(cc config.MachineConfig, name string) error { +func Delete(cc config.ClusterConfig, name string) error { _, index, err := Retrieve(&cc, name) if err != nil { return err @@ -95,7 +96,7 @@ func Delete(cc config.MachineConfig, name string) error { } // Retrieve finds the node by name in the given cluster -func Retrieve(cc *config.MachineConfig, name string) (*config.Node, int, error) { +func Retrieve(cc *config.ClusterConfig, name string) (*config.Node, int, error) { for i, n := range cc.Nodes { if n.Name == name { return &n, i, nil @@ -106,7 +107,7 @@ func Retrieve(cc *config.MachineConfig, name string) (*config.Node, int, error) } // Save saves a node to a cluster -func Save(cfg *config.MachineConfig, node *config.Node) error { +func Save(cfg *config.ClusterConfig, node *config.Node) error { update := false for i, n := range cfg.Nodes { if n.Name == node.Name { diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 07d14d4525..1fe966e5ac 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -33,7 +33,7 @@ import ( ) // Start spins up a guest and starts the kubernetes node. -func Start(mc config.MachineConfig, n config.Node, primary bool, existingAddons map[string]bool) (*kubeconfig.Settings, error) { +func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons map[string]bool) (*kubeconfig.Settings, error) { // Now that the ISO is downloaded, pull images in the background while the VM boots. var cacheGroup errgroup.Group beginCacheRequiredImages(&cacheGroup, mc.KubernetesConfig.ImageRepository, n.KubernetesVersion) diff --git a/pkg/minikube/registry/drvs/docker/docker.go b/pkg/minikube/registry/drvs/docker/docker.go index 38cc95acb7..0b66dfdecb 100644 --- a/pkg/minikube/registry/drvs/docker/docker.go +++ b/pkg/minikube/registry/drvs/docker/docker.go @@ -43,7 +43,7 @@ func init() { } } -func configure(mc config.MachineConfig) (interface{}, error) { +func configure(mc config.ClusterConfig) (interface{}, error) { return kic.NewDriver(kic.Config{ MachineName: mc.Name, StorePath: localpath.MiniPath(), diff --git a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go index 50a4e5a408..47a3db9091 100644 --- a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go +++ b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go @@ -57,7 +57,7 @@ func init() { } } -func configure(config cfg.MachineConfig) (interface{}, error) { +func configure(config cfg.ClusterConfig) (interface{}, error) { u := config.UUID if u == "" { u = uuid.NewUUID().String() diff --git a/pkg/minikube/registry/drvs/hyperv/hyperv.go b/pkg/minikube/registry/drvs/hyperv/hyperv.go index 9f15d0c470..89f63c93f3 100644 --- a/pkg/minikube/registry/drvs/hyperv/hyperv.go +++ b/pkg/minikube/registry/drvs/hyperv/hyperv.go @@ -52,7 +52,7 @@ func init() { } } -func configure(config cfg.MachineConfig) (interface{}, error) { +func configure(config cfg.ClusterConfig) (interface{}, error) { d := hyperv.NewDriver(config.Name, localpath.MiniPath()) d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) d.VSwitch = config.HypervVirtualSwitch diff --git a/pkg/minikube/registry/drvs/kvm2/kvm2.go b/pkg/minikube/registry/drvs/kvm2/kvm2.go index dedad73bfb..a3dbf67193 100644 --- a/pkg/minikube/registry/drvs/kvm2/kvm2.go +++ b/pkg/minikube/registry/drvs/kvm2/kvm2.go @@ -67,7 +67,7 @@ type kvmDriver struct { ConnectionURI string } -func configure(mc config.MachineConfig) (interface{}, error) { +func configure(mc config.ClusterConfig) (interface{}, error) { name := mc.Name return kvmDriver{ BaseDriver: &drivers.BaseDriver{ diff --git a/pkg/minikube/registry/drvs/none/none.go b/pkg/minikube/registry/drvs/none/none.go index aa8523cab6..4e1ae1a794 100644 --- a/pkg/minikube/registry/drvs/none/none.go +++ b/pkg/minikube/registry/drvs/none/none.go @@ -42,7 +42,7 @@ func init() { } } -func configure(mc config.MachineConfig) (interface{}, error) { +func configure(mc config.ClusterConfig) (interface{}, error) { return none.NewDriver(none.Config{ MachineName: mc.Name, StorePath: localpath.MiniPath(), diff --git a/pkg/minikube/registry/drvs/parallels/parallels.go b/pkg/minikube/registry/drvs/parallels/parallels.go index de319ec8fb..79d0e9085e 100644 --- a/pkg/minikube/registry/drvs/parallels/parallels.go +++ b/pkg/minikube/registry/drvs/parallels/parallels.go @@ -44,7 +44,7 @@ func init() { } -func configure(config cfg.MachineConfig) (interface{}, error) { +func configure(config cfg.ClusterConfig) (interface{}, error) { d := parallels.NewDriver(config.Name, localpath.MiniPath()).(*parallels.Driver) d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) d.Memory = config.Memory diff --git a/pkg/minikube/registry/drvs/podman/podman.go b/pkg/minikube/registry/drvs/podman/podman.go index a8e19dbbc0..ec5d6013ac 100644 --- a/pkg/minikube/registry/drvs/podman/podman.go +++ b/pkg/minikube/registry/drvs/podman/podman.go @@ -49,7 +49,7 @@ func init() { } } -func configure(mc config.MachineConfig) (interface{}, error) { +func configure(mc config.ClusterConfig) (interface{}, error) { return kic.NewDriver(kic.Config{ MachineName: mc.Name, StorePath: localpath.MiniPath(), diff --git a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go index bfba0e42db..c3888c3758 100644 --- a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go +++ b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go @@ -49,7 +49,7 @@ func init() { } } -func configure(mc config.MachineConfig) (interface{}, error) { +func configure(mc config.ClusterConfig) (interface{}, error) { d := virtualbox.NewDriver(mc.Name, localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory diff --git a/pkg/minikube/registry/drvs/vmware/vmware.go b/pkg/minikube/registry/drvs/vmware/vmware.go index 885063cde2..0333dce541 100644 --- a/pkg/minikube/registry/drvs/vmware/vmware.go +++ b/pkg/minikube/registry/drvs/vmware/vmware.go @@ -39,7 +39,7 @@ func init() { } } -func configure(mc config.MachineConfig) (interface{}, error) { +func configure(mc config.ClusterConfig) (interface{}, error) { d := vmwcfg.NewConfig(mc.Name, localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory diff --git a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go index bb5ed4196b..524e50f88c 100644 --- a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go +++ b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go @@ -44,7 +44,7 @@ func init() { } } -func configure(config cfg.MachineConfig) (interface{}, error) { +func configure(config cfg.ClusterConfig) (interface{}, error) { d := vmwarefusion.NewDriver(config.Name, localpath.MiniPath()).(*vmwarefusion.Driver) d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) d.Memory = config.Memory diff --git a/pkg/minikube/registry/registry.go b/pkg/minikube/registry/registry.go index 159c7a4568..e5fb98ce51 100644 --- a/pkg/minikube/registry/registry.go +++ b/pkg/minikube/registry/registry.go @@ -60,7 +60,7 @@ type Registry interface { } // Configurator emits a struct to be marshalled into JSON for Machine Driver -type Configurator func(config.MachineConfig) (interface{}, error) +type Configurator func(config.ClusterConfig) (interface{}, error) // Loader is a function that loads a byte stream and creates a driver. type Loader func() drivers.Driver diff --git a/pkg/minikube/tunnel/cluster_inspector.go b/pkg/minikube/tunnel/cluster_inspector.go index bb86db778a..8f9001e3db 100644 --- a/pkg/minikube/tunnel/cluster_inspector.go +++ b/pkg/minikube/tunnel/cluster_inspector.go @@ -64,7 +64,7 @@ func (m *clusterInspector) getStateAndRoute() (HostState, *Route, error) { if err != nil { return hostState, nil, err } - var c *config.MachineConfig + var c *config.ClusterConfig c, err = m.configLoader.LoadConfigFromFile(m.machineName) if err != nil { err = errors.Wrapf(err, "error loading config for %s", m.machineName) @@ -80,7 +80,7 @@ func (m *clusterInspector) getStateAndRoute() (HostState, *Route, error) { return hostState, route, nil } -func getRoute(host *host.Host, clusterConfig config.MachineConfig) (*Route, error) { +func getRoute(host *host.Host, clusterConfig config.ClusterConfig) (*Route, error) { hostDriverIP, err := host.Driver.GetIP() if err != nil { return nil, errors.Wrapf(err, "error getting host IP for %s", host.Name) diff --git a/pkg/minikube/tunnel/cluster_inspector_test.go b/pkg/minikube/tunnel/cluster_inspector_test.go index c3be2e2483..834bd8241d 100644 --- a/pkg/minikube/tunnel/cluster_inspector_test.go +++ b/pkg/minikube/tunnel/cluster_inspector_test.go @@ -66,7 +66,7 @@ func TestMinikubeCheckReturnsHostInformation(t *testing.T) { } configLoader := &stubConfigLoader{ - c: &config.MachineConfig{ + c: &config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ ServiceCIDR: "96.0.0.0/12", }, @@ -104,7 +104,7 @@ func TestMinikubeCheckReturnsHostInformation(t *testing.T) { } func TestUnparseableCIDR(t *testing.T) { - cfg := config.MachineConfig{ + cfg := config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ ServiceCIDR: "bad.cidr.0.0/12", }} @@ -124,7 +124,7 @@ func TestUnparseableCIDR(t *testing.T) { func TestRouteIPDetection(t *testing.T) { expectedTargetCIDR := "10.96.0.0/12" - cfg := config.MachineConfig{ + cfg := config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ ServiceCIDR: expectedTargetCIDR, }, diff --git a/pkg/minikube/tunnel/test_doubles.go b/pkg/minikube/tunnel/test_doubles.go index 5ac4874593..b8a8ae009f 100644 --- a/pkg/minikube/tunnel/test_doubles.go +++ b/pkg/minikube/tunnel/test_doubles.go @@ -82,14 +82,14 @@ func (r *fakeRouter) Inspect(route *Route) (exists bool, conflict string, overla } type stubConfigLoader struct { - c *config.MachineConfig + c *config.ClusterConfig e error } -func (l *stubConfigLoader) WriteConfigToFile(profileName string, cc *config.MachineConfig, miniHome ...string) error { +func (l *stubConfigLoader) WriteConfigToFile(profileName string, cc *config.ClusterConfig, miniHome ...string) error { return l.e } -func (l *stubConfigLoader) LoadConfigFromFile(profile string, miniHome ...string) (*config.MachineConfig, error) { +func (l *stubConfigLoader) LoadConfigFromFile(profile string, miniHome ...string) (*config.ClusterConfig, error) { return l.c, l.e } diff --git a/pkg/minikube/tunnel/tunnel_test.go b/pkg/minikube/tunnel/tunnel_test.go index 20048f36d0..c017fd7aac 100644 --- a/pkg/minikube/tunnel/tunnel_test.go +++ b/pkg/minikube/tunnel/tunnel_test.go @@ -423,7 +423,7 @@ func TestTunnel(t *testing.T) { }, } configLoader := &stubConfigLoader{ - c: &config.MachineConfig{ + c: &config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ ServiceCIDR: tc.serviceCIDR, }}, @@ -478,7 +478,7 @@ func TestErrorCreatingTunnel(t *testing.T) { } configLoader := &stubConfigLoader{ - c: &config.MachineConfig{ + c: &config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ ServiceCIDR: "10.96.0.0/12", }}, diff --git a/site/content/en/docs/Contributing/drivers.en.md b/site/content/en/docs/Contributing/drivers.en.md index aecd0e1179..6c3f4a74a8 100644 --- a/site/content/en/docs/Contributing/drivers.en.md +++ b/site/content/en/docs/Contributing/drivers.en.md @@ -85,7 +85,7 @@ func init() { }) } -func createVMwareFusionHost(config cfg.MachineConfig) interface{} { +func createVMwareFusionHost(config cfg.ClusterConfig) interface{} { d := vmwarefusion.NewDriver(config.Name, localpath.MiniPath()).(*vmwarefusion.Driver) d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) d.Memory = config.Memory From 3bccafb66bb644e2a77aaa0da93f20df1cc557d3 Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Wed, 19 Feb 2020 20:23:06 -0500 Subject: [PATCH 017/668] Update hyperv.go --- pkg/minikube/registry/drvs/hyperv/hyperv.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/registry/drvs/hyperv/hyperv.go b/pkg/minikube/registry/drvs/hyperv/hyperv.go index f17b9517fe..9f15d0c470 100644 --- a/pkg/minikube/registry/drvs/hyperv/hyperv.go +++ b/pkg/minikube/registry/drvs/hyperv/hyperv.go @@ -88,7 +88,7 @@ func status() registry.State { // Allow no more than 2 seconds for querying state ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - + cmd := exec.CommandContext(ctx, path, "Get-WindowsOptionalFeature", "-FeatureName", "Microsoft-Hyper-V-All", "-Online") out, err := cmd.CombinedOutput() if err != nil { From ec191119dfebacf9ef385e745c4c1bde1982e542 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 21 Feb 2020 15:46:02 -0800 Subject: [PATCH 018/668] let's move some start code around --- cmd/minikube/cmd/delete.go | 24 +++++---- cmd/minikube/cmd/node_add.go | 2 +- cmd/minikube/cmd/node_start.go | 2 +- cmd/minikube/cmd/start.go | 8 +-- pkg/minikube/bootstrapper/bootstrapper.go | 1 + pkg/minikube/cluster/cluster.go | 3 ++ pkg/minikube/node/config.go | 63 ++--------------------- pkg/minikube/node/machine.go | 3 +- pkg/minikube/node/start.go | 29 +++-------- 9 files changed, 36 insertions(+), 99 deletions(-) diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index f58bd5440e..324d6d6403 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -191,14 +191,20 @@ func deleteProfile(profile *pkg_config.Profile) error { } if err == nil && driver.BareMetal(cc.Driver) { - if err := uninstallKubernetes(api, profile.Name, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper)); err != nil { - deletionError, ok := err.(DeletionError) - if ok { - delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err)) - deletionError.Err = delErr - return deletionError + var e error + for _, n := range cc.Nodes { + if err := uninstallKubernetes(api, profile.Name, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper), n.Name); err != nil { + deletionError, ok := err.(DeletionError) + if ok { + delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err)) + deletionError.Err = delErr + e = deletionError + } + e = err } - return err + } + if e != nil { + return e } } @@ -272,9 +278,9 @@ func profileDeletionErr(profileName string, additionalInfo string) error { return fmt.Errorf("error deleting profile \"%s\": %s", profileName, additionalInfo) } -func uninstallKubernetes(api libmachine.API, profile string, kc pkg_config.KubernetesConfig, bsName string) error { +func uninstallKubernetes(api libmachine.API, profile string, kc pkg_config.KubernetesConfig, bsName string, nodeName string) error { out.T(out.Resetting, "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...", out.V{"kubernetes_version": kc.KubernetesVersion, "bootstrapper_name": bsName}) - clusterBootstrapper, err := cluster.Bootstrapper(api, bsName) + clusterBootstrapper, err := cluster.Bootstrapper(api, bsName, nodeName) if err != nil { return DeletionError{Err: fmt.Errorf("unable to get bootstrapper: %v", err), Errtype: Fatal} } diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 239c1df7ec..7d6b0841c6 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -54,7 +54,7 @@ var nodeAddCmd = &cobra.Command{ exit.WithError("Error adding node to cluster", err) } - _, err = node.Start(*mc, *n, false, nil) + err = node.Start(*mc, *n, false, nil) if err != nil { exit.WithError("Error starting node", err) } diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index d62cdf7ef1..c08c8b1037 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -61,7 +61,7 @@ var nodeStartCmd = &cobra.Command{ } // Start it up baby - _, err = node.Start(*cc, *n, false, nil) + err = node.Start(*cc, *n, true, nil) if err != nil { out.FatalT("Failed to start node {{.name}}", out.V{"name": name}) } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 72d83bd549..3154c2370e 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -174,7 +174,7 @@ func initMinikubeFlags() { // initKubernetesFlags inits the commandline flags for kubernetes related options func initKubernetesFlags() { startCmd.Flags().String(kubernetesVersion, "", "The kubernetes version that the minikube VM will use (ex: v1.2.3)") - startCmd.Flags().Var(&node.ExtraOptions, "extra-config", + startCmd.Flags().Var(&cluster.ExtraOptions, "extra-config", `A set of key=value pairs that describe configuration that may be passed to different components. The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler @@ -688,7 +688,7 @@ func validateFlags(cmd *cobra.Command, drvName string) { validateCPUCount(driver.BareMetal(drvName)) // check that kubeadm extra args contain only whitelisted parameters - for param := range node.ExtraOptions.AsMap().Get(bsutil.Kubeadm) { + for param := range cluster.ExtraOptions.AsMap().Get(bsutil.Kubeadm) { if !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], param) && !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], param) { exit.UsageT("Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param}) @@ -821,7 +821,7 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) NetworkPlugin: selectedNetworkPlugin, ServiceCIDR: viper.GetString(serviceCIDR), ImageRepository: repository, - ExtraOptions: node.ExtraOptions, + ExtraOptions: cluster.ExtraOptions, ShouldLoadCachedImages: viper.GetBool(cacheImages), EnableDefaultCNI: selectedEnableDefaultCNI, }, @@ -855,7 +855,7 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) { if !cmd.Flags().Changed("extra-config") && len(hints.ExtraOptions) > 0 { for _, eo := range hints.ExtraOptions { glog.Infof("auto setting extra-config to %q.", eo) - err = node.ExtraOptions.Set(eo) + err = cluster.ExtraOptions.Set(eo) if err != nil { err = errors.Wrapf(err, "setting extra option %s", eo) } diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index eba5167179..d09bef5521 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -40,6 +40,7 @@ type Bootstrapper interface { DeleteCluster(config.KubernetesConfig) error WaitForCluster(config.ClusterConfig, time.Duration) error JoinCluster(config.ClusterConfig, config.Node, string) error + UpdateNode(config.ClusterConfig) // LogCommands returns a map of log type to a command which will display that log. LogCommands(LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index a2b9e06613..baafe689a7 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -26,9 +26,12 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" ) +var ExtraOptions config.ExtraOptionSlice + // This init function is used to set the logtostderr variable to false so that INFO level log info does not clutter the CLI // INFO lvl logging is displayed due to the kubernetes api calling flag.Set("logtostderr", "true") in its init() // see: https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/util/logs/logs.go#L32-L34 diff --git a/pkg/minikube/node/config.go b/pkg/minikube/node/config.go index ce76ded6b4..eb1a61f68c 100644 --- a/pkg/minikube/node/config.go +++ b/pkg/minikube/node/config.go @@ -22,31 +22,23 @@ import ( "os/exec" "path/filepath" "strconv" - "strings" - "github.com/docker/machine/libmachine" - "github.com/docker/machine/libmachine/host" "github.com/golang/glog" "github.com/spf13/viper" - cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" - "k8s.io/minikube/pkg/minikube/bootstrapper" - "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/util/lock" ) var ( - DockerEnv []string - DockerOpt []string - ExtraOptions config.ExtraOptionSlice - AddonList []string + DockerEnv []string + DockerOpt []string + AddonList []string ) // configureRuntimes does what needs to happen to get a runtime going. @@ -80,55 +72,6 @@ func showVersionInfo(k8sVersion string, cr cruntime.Manager) { } } -// setupKubeAdm adds any requested files into the VM before Kubernetes is started -func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, node config.Node) bootstrapper.Bootstrapper { - bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper)) - if err != nil { - exit.WithError("Failed to get bootstrapper", err) - } - for _, eo := range ExtraOptions { - out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) - } - // Loads cached images, generates config files, download binaries - if err := bs.UpdateCluster(cfg); err != nil { - exit.WithError("Failed to update cluster", err) - } - if err := bs.SetupCerts(cfg.KubernetesConfig, node); err != nil { - exit.WithError("Failed to setup certs", err) - } - return bs -} - -func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { - addr, err := h.Driver.GetURL() - if err != nil { - exit.WithError("Failed to get driver URL", err) - } - if !driver.IsKIC(h.DriverName) { - addr = strings.Replace(addr, "tcp://", "https://", -1) - addr = strings.Replace(addr, ":2376", ":"+strconv.Itoa(n.Port), -1) - } - - if c.KubernetesConfig.APIServerName != constants.APIServerName { - addr = strings.Replace(addr, n.IP, c.KubernetesConfig.APIServerName, -1) - } - kcs := &kubeconfig.Settings{ - ClusterName: clusterName, - ClusterServerAddress: addr, - ClientCertificate: localpath.MakeMiniPath("client.crt"), - ClientKey: localpath.MakeMiniPath("client.key"), - CertificateAuthority: localpath.MakeMiniPath("ca.crt"), - KeepContext: viper.GetBool(keepContext), - EmbedCerts: viper.GetBool(embedCerts), - } - - kcs.SetPath(kubeconfig.PathFromEnv()) - if err := kubeconfig.Update(kcs); err != nil { - return kcs, err - } - return kcs, nil -} - // configureMounts configures any requested filesystem mounts func configureMounts() { if !viper.GetBool(createMount) { diff --git a/pkg/minikube/node/machine.go b/pkg/minikube/node/machine.go index d0c4021222..279e233c2a 100644 --- a/pkg/minikube/node/machine.go +++ b/pkg/minikube/node/machine.go @@ -39,7 +39,8 @@ import ( "k8s.io/minikube/pkg/util/retry" ) -func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { +// StartMachine starts a VM +func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { m, err := machine.NewAPIClient() if err != nil { exit.WithError("Failed to get machine client", err) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 1fe966e5ac..353c5ae4e9 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -22,18 +22,17 @@ import ( "github.com/spf13/viper" "golang.org/x/sync/errgroup" "k8s.io/minikube/pkg/addons" + "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/logs" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/util" ) // Start spins up a guest and starts the kubernetes node. -func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons map[string]bool) (*kubeconfig.Settings, error) { +func Start(mc config.ClusterConfig, n config.Node, preExists bool, existingAddons map[string]bool) error { // Now that the ISO is downloaded, pull images in the background while the VM boots. var cacheGroup errgroup.Group beginCacheRequiredImages(&cacheGroup, mc.KubernetesConfig.ImageRepository, n.KubernetesVersion) @@ -44,33 +43,17 @@ func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons exit.WithError("Failed to save config", err) } + bs, err := cluster.Bootstrapper() + k8sVersion := mc.KubernetesConfig.KubernetesVersion driverName := mc.Driver // exits here in case of --download-only option. handleDownloadOnly(&cacheGroup, k8sVersion) - mRunner, preExists, machineAPI, host := startMachine(&mc, &n) - defer machineAPI.Close() // configure the runtime (docker, containerd, crio) cr := configureRuntimes(mRunner, driverName, mc.KubernetesConfig) showVersionInfo(k8sVersion, cr) waitCacheRequiredImages(&cacheGroup) - //TODO(sharifelgamal): Part out the cluster-wide operations, perhaps using the "primary" param - - // Must be written before bootstrap, otherwise health checks may flake due to stale IP - kubeconfig, err := setupKubeconfig(host, &mc, &n, mc.Name) - if err != nil { - exit.WithError("Failed to setup kubeconfig", err) - } - - // setup kubeadm (must come after setupKubeconfig) - bs := setupKubeAdm(machineAPI, mc, n) - - // pull images or restart cluster - out.T(out.Launch, "Launching Kubernetes ... ") - if err := bs.StartCluster(mc); err != nil { - exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) - } configureMounts() // enable addons, both old and new! @@ -80,7 +63,7 @@ func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons } addons.Start(viper.GetString(config.MachineProfile), ea, AddonList) - if err = CacheAndLoadImagesInConfig(); err != nil { + if err := CacheAndLoadImagesInConfig(); err != nil { out.T(out.FailureType, "Unable to load cached images from config file.") } @@ -96,7 +79,7 @@ func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons } } - return kubeconfig, nil + return nil } // prepareNone prepares the user and host for the joy of the "none" driver From 785338737d11f27ef5ad421714b0a1e37c42f2a2 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 21 Feb 2020 16:06:11 -0800 Subject: [PATCH 019/668] add new setup cluster file --- pkg/minikube/cluster/setup.go | 124 ++++++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 pkg/minikube/cluster/setup.go diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go new file mode 100644 index 0000000000..9ce35bcf9c --- /dev/null +++ b/pkg/minikube/cluster/setup.go @@ -0,0 +1,124 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "strconv" + "strings" + + "github.com/docker/machine/libmachine" + "github.com/docker/machine/libmachine/host" + "github.com/spf13/viper" + cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" + "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/kubeconfig" + "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/logs" + "k8s.io/minikube/pkg/minikube/out" +) + +const ( + waitTimeout = "wait-timeout" + waitUntilHealthy = "wait" + embedCerts = "embed-certs" + keepContext = "keep-context" +) + +// InitialSetup performs all necessary operations on the initial control plane node when first spinning up a cluster +func InitialSetup(cc config.ClusterConfig, n config.Node, cr cruntime.Manager) (*kubeconfig.Settings, error) { + mRunner, preExists, machineAPI, host := StartMachine(&cc, &n) + defer machineAPI.Close() + + // Must be written before bootstrap, otherwise health checks may flake due to stale IP + kubeconfig, err := setupKubeconfig(host, &cc, &n, cc.Name) + if err != nil { + exit.WithError("Failed to setup kubeconfig", err) + } + + // setup kubeadm (must come after setupKubeconfig) + bs := setupKubeAdm(machineAPI, cc, n) + + // pull images or restart cluster + out.T(out.Launch, "Launching Kubernetes ... ") + if err := bs.StartCluster(cc); err != nil { + exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) + } + + // Skip pre-existing, because we already waited for health + if viper.GetBool(waitUntilHealthy) && !preExists { + if err := bs.WaitForCluster(cc, viper.GetDuration(waitTimeout)); err != nil { + exit.WithError("Wait failed", err) + } + } + + return kubeconfig, nil + +} + +// setupKubeAdm adds any requested files into the VM before Kubernetes is started +func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper { + bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), n.Name) + if err != nil { + exit.WithError("Failed to get bootstrapper", err) + } + for _, eo := range ExtraOptions { + out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) + } + // Loads cached images, generates config files, download binaries + if err := bs.UpdateCluster(cfg); err != nil { + exit.WithError("Failed to update cluster", err) + } + if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil { + exit.WithError("Failed to setup certs", err) + } + return bs +} + +func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { + addr, err := h.Driver.GetURL() + if err != nil { + exit.WithError("Failed to get driver URL", err) + } + if !driver.IsKIC(h.DriverName) { + addr = strings.Replace(addr, "tcp://", "https://", -1) + addr = strings.Replace(addr, ":2376", ":"+strconv.Itoa(n.Port), -1) + } + + if c.KubernetesConfig.APIServerName != constants.APIServerName { + addr = strings.Replace(addr, n.IP, c.KubernetesConfig.APIServerName, -1) + } + kcs := &kubeconfig.Settings{ + ClusterName: clusterName, + ClusterServerAddress: addr, + ClientCertificate: localpath.MakeMiniPath("client.crt"), + ClientKey: localpath.MakeMiniPath("client.key"), + CertificateAuthority: localpath.MakeMiniPath("ca.crt"), + KeepContext: viper.GetBool(keepContext), + EmbedCerts: viper.GetBool(embedCerts), + } + + kcs.SetPath(kubeconfig.PathFromEnv()) + if err := kubeconfig.Update(kcs); err != nil { + return kcs, err + } + return kcs, nil +} From f22efd871aa7173f20ba41dc7e43735daa8dc051 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Sun, 23 Feb 2020 22:41:08 -0800 Subject: [PATCH 020/668] mostly moving code around and adding UpdateNode --- cmd/minikube/cmd/node_add.go | 7 +- cmd/minikube/cmd/node_start.go | 2 +- cmd/minikube/cmd/start.go | 39 ++-- pkg/minikube/bootstrapper/bootstrapper.go | 4 +- pkg/minikube/bootstrapper/bsutil/ops.go | 2 +- pkg/minikube/bootstrapper/certs.go | 6 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 34 +++- pkg/minikube/cluster/cluster.go | 3 - pkg/minikube/cluster/setup.go | 177 +++++++++++++++++- pkg/minikube/config/node.go | 11 ++ pkg/minikube/config/profile.go | 18 ++ pkg/minikube/machine/cluster_test.go | 27 ++- pkg/minikube/machine/fix.go | 6 +- pkg/minikube/machine/start.go | 12 +- pkg/minikube/node/config.go | 67 +------ pkg/minikube/node/machine.go | 185 ------------------- pkg/minikube/node/node.go | 10 +- pkg/minikube/node/start.go | 47 +++-- 18 files changed, 334 insertions(+), 323 deletions(-) delete mode 100644 pkg/minikube/node/machine.go diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 7d6b0841c6..cf3a1c626e 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -49,16 +49,11 @@ var nodeAddCmd = &cobra.Command{ } out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) - n, err := node.Add(mc, name, cp, worker, "", profile) + err = node.Add(mc, name, cp, worker, "", profile) if err != nil { exit.WithError("Error adding node to cluster", err) } - err = node.Start(*mc, *n, false, nil) - if err != nil { - exit.WithError("Error starting node", err) - } - out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": profile}) }, } diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index c08c8b1037..9d17ab1b7f 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -61,7 +61,7 @@ var nodeStartCmd = &cobra.Command{ } // Start it up baby - err = node.Start(*cc, *n, true, nil) + err = node.Start(*cc, *n, nil) if err != nil { out.FatalT("Failed to start node {{.name}}", out.V{"name": name}) } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 1162429547..fbb30b7adb 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -118,6 +118,7 @@ const ( autoUpdate = "auto-update-drivers" hostOnlyNicType = "host-only-nic-type" natNicType = "nat-nic-type" + nodes = "nodes" ) var ( @@ -160,7 +161,7 @@ func initMinikubeFlags() { startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd).") startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.") startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.") - startCmd.Flags().StringArrayVar(&node.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.") + startCmd.Flags().StringArrayVar(&config.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.") startCmd.Flags().String(criSocket, "", "The cri socket path to be used.") startCmd.Flags().String(networkPlugin, "", "The name of the network plugin.") startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\".") @@ -169,12 +170,13 @@ func initMinikubeFlags() { startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.") startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.") startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.") + startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.") } // initKubernetesFlags inits the commandline flags for kubernetes related options func initKubernetesFlags() { startCmd.Flags().String(kubernetesVersion, "", "The kubernetes version that the minikube VM will use (ex: v1.2.3)") - startCmd.Flags().Var(&cluster.ExtraOptions, "extra-config", + startCmd.Flags().Var(&config.ExtraOptions, "extra-config", `A set of key=value pairs that describe configuration that may be passed to different components. The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler @@ -226,8 +228,8 @@ func initNetworkingFlags() { startCmd.Flags().String(imageRepository, "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \"auto\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers") startCmd.Flags().String(imageMirrorCountry, "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.") startCmd.Flags().String(serviceCIDR, constants.DefaultServiceCIDR, "The CIDR to be used for service cluster IPs.") - startCmd.Flags().StringArrayVar(&node.DockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)") - startCmd.Flags().StringArrayVar(&node.DockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)") + startCmd.Flags().StringArrayVar(&config.DockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)") + startCmd.Flags().StringArrayVar(&config.DockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)") } // startCmd represents the start command @@ -335,7 +337,14 @@ func runStart(cmd *cobra.Command, args []string) { existingAddons = existing.Addons } } - kubeconfig, err := node.Start(mc, n, true, existingAddons) + + // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. + // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. + if err := config.SaveProfile(viper.GetString(config.MachineProfile), &mc); err != nil { + exit.WithError("Failed to save config", err) + } + + kubeconfig, err := cluster.InitialSetup(mc, n, existingAddons) if err != nil { exit.WithError("Starting node", err) } @@ -343,6 +352,14 @@ func runStart(cmd *cobra.Command, args []string) { if err := showKubectlInfo(kubeconfig, k8sVersion, mc.Name); err != nil { glog.Errorf("kubectl info: %v", err) } + + numNodes := viper.GetInt(nodes) + if numNodes > 1 { + for i := 0; i < numNodes-1; i++ { + nodeName := fmt.Sprintf("%s%d", n.Name, i+1) + node.Add(&mc, nodeName, false, true, "", "") + } + } } func updateDriver(driverName string) { @@ -691,7 +708,7 @@ func validateFlags(cmd *cobra.Command, drvName string) { validateCPUCount(driver.BareMetal(drvName)) // check that kubeadm extra args contain only whitelisted parameters - for param := range cluster.ExtraOptions.AsMap().Get(bsutil.Kubeadm) { + for param := range config.ExtraOptions.AsMap().Get(bsutil.Kubeadm) { if !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], param) && !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], param) { exit.UsageT("Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param}) @@ -791,8 +808,8 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) HyperkitVSockPorts: viper.GetStringSlice(vsockPorts), NFSShare: viper.GetStringSlice(nfsShare), NFSSharesRoot: viper.GetString(nfsSharesRoot), - DockerEnv: node.DockerEnv, - DockerOpt: node.DockerOpt, + DockerEnv: config.DockerEnv, + DockerOpt: config.DockerOpt, InsecureRegistry: insecureRegistry, RegistryMirror: registryMirror, HostOnlyCIDR: viper.GetString(hostOnlyCIDR), @@ -824,7 +841,7 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) NetworkPlugin: selectedNetworkPlugin, ServiceCIDR: viper.GetString(serviceCIDR), ImageRepository: repository, - ExtraOptions: cluster.ExtraOptions, + ExtraOptions: config.ExtraOptions, ShouldLoadCachedImages: viper.GetBool(cacheImages), EnableDefaultCNI: selectedEnableDefaultCNI, }, @@ -846,7 +863,7 @@ func setDockerProxy() { continue } } - node.DockerEnv = append(node.DockerEnv, fmt.Sprintf("%s=%s", k, v)) + config.DockerEnv = append(config.DockerEnv, fmt.Sprintf("%s=%s", k, v)) } } } @@ -858,7 +875,7 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) { if !cmd.Flags().Changed("extra-config") && len(hints.ExtraOptions) > 0 { for _, eo := range hints.ExtraOptions { glog.Infof("auto setting extra-config to %q.", eo) - err = cluster.ExtraOptions.Set(eo) + err = config.ExtraOptions.Set(eo) if err != nil { err = errors.Wrapf(err, "setting extra option %s", eo) } diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index d09bef5521..6bb03fa986 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -23,6 +23,7 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/cruntime" ) // LogOptions are options to be passed to LogCommands @@ -40,7 +41,8 @@ type Bootstrapper interface { DeleteCluster(config.KubernetesConfig) error WaitForCluster(config.ClusterConfig, time.Duration) error JoinCluster(config.ClusterConfig, config.Node, string) error - UpdateNode(config.ClusterConfig) + UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error + GenerateToken(config.KubernetesConfig) (string, error) // LogCommands returns a map of log type to a command which will display that log. LogCommands(LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error diff --git a/pkg/minikube/bootstrapper/bsutil/ops.go b/pkg/minikube/bootstrapper/bsutil/ops.go index bf855a9210..d364aa0748 100644 --- a/pkg/minikube/bootstrapper/bsutil/ops.go +++ b/pkg/minikube/bootstrapper/bsutil/ops.go @@ -47,7 +47,7 @@ func AdjustResourceLimits(c command.Runner) error { return nil } -// ExistingConfig checks if there are config files from possible previous kubernets cluster +// ExistingConfig checks if there are config files from possible previous kubernetes cluster func ExistingConfig(c command.Runner) error { args := append([]string{"ls"}, expectedRemoteArtifacts...) _, err := c.RunCmd(exec.Command("sudo", args...)) diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index bb4bde2a38..7b634970a0 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -121,8 +121,10 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) return errors.Wrap(err, "encoding kubeconfig") } - kubeCfgFile := assets.NewMemoryAsset(data, vmpath.GuestPersistentDir, "kubeconfig", "0644") - copyableFiles = append(copyableFiles, kubeCfgFile) + if n.ControlPlane { + kubeCfgFile := assets.NewMemoryAsset(data, vmpath.GuestPersistentDir, "kubeconfig", "0644") + copyableFiles = append(copyableFiles, kubeCfgFile) + } for _, f := range copyableFiles { if err := cmd.Copy(f); err != nil { diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 5af39eac9b..663e538f8c 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -381,6 +381,20 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC return nil } +// GenerateToken creates a token and returns the appropriate kubeadm join command to run +func (k *Bootstrapper) GenerateToken(k8s config.KubernetesConfig) (string, error) { + tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(k8s.KubernetesVersion))) + r, err := k.c.RunCmd(tokenCmd) + if err != nil { + return "", errors.Wrap(err, "generating bootstrap token") + } + joinCmd := r.Stdout.String() + joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(k8s.KubernetesVersion), 1) + joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) + + return joinCmd, nil +} + // DeleteCluster removes the components that were started earlier func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { version, err := bsutil.ParseKubernetesVersion(k8s.KubernetesVersion) @@ -405,7 +419,7 @@ func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node) er return bootstrapper.SetupCerts(k.c, k8s, n) } -// UpdateCluster updates the cluster +// UpdateCluster updates the cluster. func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { images, err := images.Kubeadm(cfg.KubernetesConfig.ImageRepository, cfg.KubernetesConfig.KubernetesVersion) if err != nil { @@ -423,14 +437,24 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "runtime") } - // TODO: multiple nodes - kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, r, cfg.Nodes[0]) + for _, n := range cfg.Nodes { + err := k.UpdateNode(cfg, n, r) + if err != nil { + return errors.Wrap(err, "updating node") + } + } + + return nil +} + +// UpdateNode updates a node. +func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cruntime.Manager) error { + kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, r, n) if err != nil { return errors.Wrap(err, "generating kubeadm cfg") } - // TODO: multiple nodes - kubeletCfg, err := bsutil.NewKubeletConfig(cfg, cfg.Nodes[0], r) + kubeletCfg, err := bsutil.NewKubeletConfig(cfg, n, r) if err != nil { return errors.Wrap(err, "generating kubelet config") } diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index baafe689a7..a2b9e06613 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -26,12 +26,9 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" ) -var ExtraOptions config.ExtraOptionSlice - // This init function is used to set the logtostderr variable to false so that INFO level log info does not clutter the CLI // INFO lvl logging is displayed due to the kubernetes api calling flag.Set("logtostderr", "true") in its init() // see: https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/util/logs/logs.go#L32-L34 diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index 9ce35bcf9c..d786312820 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -17,23 +17,33 @@ limitations under the License. package cluster import ( + "fmt" + "net" + "os" + "os/exec" "strconv" "strings" + "time" "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/host" + "github.com/golang/glog" "github.com/spf13/viper" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" + "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/bootstrapper/images" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" - "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/logs" + "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/proxy" + "k8s.io/minikube/pkg/util/retry" ) const ( @@ -41,11 +51,13 @@ const ( waitUntilHealthy = "wait" embedCerts = "embed-certs" keepContext = "keep-context" + imageRepository = "image-repository" + containerRuntime = "container-runtime" ) // InitialSetup performs all necessary operations on the initial control plane node when first spinning up a cluster -func InitialSetup(cc config.ClusterConfig, n config.Node, cr cruntime.Manager) (*kubeconfig.Settings, error) { - mRunner, preExists, machineAPI, host := StartMachine(&cc, &n) +func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) (*kubeconfig.Settings, error) { + _, preExists, machineAPI, host := StartMachine(&cc, &n) defer machineAPI.Close() // Must be written before bootstrap, otherwise health checks may flake due to stale IP @@ -59,8 +71,17 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, cr cruntime.Manager) ( // pull images or restart cluster out.T(out.Launch, "Launching Kubernetes ... ") - if err := bs.StartCluster(cc); err != nil { - exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) + err = bs.StartCluster(cc) + if err != nil { + /*config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: mRunner, ImageRepository: cc.KubernetesConfig.ImageRepository, KubernetesVersion: cc.KubernetesConfig.KubernetesVersion} + cr, err := cruntime.New(config) + exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner))*/ + exit.WithError("Error starting cluster", err) + } + + // enable addons, both old and new! + if existingAddons != nil { + addons.Start(viper.GetString(config.MachineProfile), existingAddons, config.AddonList) } // Skip pre-existing, because we already waited for health @@ -80,7 +101,7 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) if err != nil { exit.WithError("Failed to get bootstrapper", err) } - for _, eo := range ExtraOptions { + for _, eo := range config.ExtraOptions { out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) } // Loads cached images, generates config files, download binaries @@ -122,3 +143,145 @@ func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clus } return kcs, nil } + +// StartMachine starts a VM +func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { + m, err := machine.NewAPIClient() + if err != nil { + exit.WithError("Failed to get machine client", err) + } + host, preExists = startHost(m, *cfg, *node) + runner, err = machine.CommandRunner(host) + if err != nil { + exit.WithError("Failed to get command runner", err) + } + + ip := validateNetwork(host, runner) + + // Bypass proxy for minikube's vm host ip + err = proxy.ExcludeIP(ip) + if err != nil { + out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) + } + + node.IP = ip + config.SaveNodeToProfile(cfg, node) + + return runner, preExists, m, host +} + +// startHost starts a new minikube host using a VM or None +func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) { + exists, err := api.Exists(n.Name) + if err != nil { + exit.WithError("Failed to check if machine exists", err) + } + + host, err := machine.StartHost(api, mc, n) + if err != nil { + exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) + } + return host, exists +} + +// validateNetwork tries to catch network problems as soon as possible +func validateNetwork(h *host.Host, r command.Runner) string { + ip, err := h.Driver.GetIP() + if err != nil { + exit.WithError("Unable to get VM IP address", err) + } + + optSeen := false + warnedOnce := false + for _, k := range proxy.EnvVars { + if v := os.Getenv(k); v != "" { + if !optSeen { + out.T(out.Internet, "Found network options:") + optSeen = true + } + out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v}) + ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY + k = strings.ToUpper(k) // for http_proxy & https_proxy + if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { + out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"}) + warnedOnce = true + } + } + } + + if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) { + trySSH(h, ip) + } + + tryLookup(r) + tryRegistry(r) + return ip +} + +func trySSH(h *host.Host, ip string) { + if viper.GetBool("force") { + return + } + + sshAddr := net.JoinHostPort(ip, "22") + + dial := func() (err error) { + d := net.Dialer{Timeout: 3 * time.Second} + conn, err := d.Dial("tcp", sshAddr) + if err != nil { + out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err}) + return err + } + _ = conn.Close() + return nil + } + + if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil { + exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}} + + This is likely due to one of two reasons: + + - VPN or firewall interference + - {{.hypervisor}} network configuration issue + + Suggested workarounds: + + - Disable your local VPN or firewall software + - Configure your local VPN or firewall to allow access to {{.ip}} + - Restart or reinstall {{.hypervisor}} + - Use an alternative --vm-driver + - Use --force to override this connectivity check + `, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip}) + } +} + +func tryLookup(r command.Runner) { + // DNS check + if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil { + glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err) + // will try with without query type for ISOs with different busybox versions. + if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil { + glog.Warningf("nslookup failed: %v", err) + out.WarningT("Node may be unable to resolve external DNS records") + } + } +} +func tryRegistry(r command.Runner) { + // Try an HTTPS connection to the image repository + proxy := os.Getenv("HTTPS_PROXY") + opts := []string{"-sS"} + if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { + opts = append([]string{"-x", proxy}, opts...) + } + + repo := viper.GetString(imageRepository) + if repo == "" { + repo = images.DefaultKubernetesRepo + } + + opts = append(opts, fmt.Sprintf("https://%s/", repo)) + if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil { + glog.Warningf("%s failed: %v", rr.Args, err) + out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo}) + } +} diff --git a/pkg/minikube/config/node.go b/pkg/minikube/config/node.go index 1c6f050159..572a182553 100644 --- a/pkg/minikube/config/node.go +++ b/pkg/minikube/config/node.go @@ -16,6 +16,17 @@ limitations under the License. package config +var ( + // DockerEnv contains the environment variables + DockerEnv []string + // DockerOpt contains the option parameters + DockerOpt []string + // ExtraOptions contains extra options (if any) + ExtraOptions ExtraOptionSlice + // AddonList contains the list of addons + AddonList []string +) + // AddNode adds a new node config to an existing cluster. func AddNode(cc *ClusterConfig, name string, controlPlane bool, k8sVersion string, profileName string) error { node := Node{ diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index 5ad8572e13..0acfe1a8b4 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -25,6 +25,7 @@ import ( "strings" "github.com/golang/glog" + "github.com/spf13/viper" "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/util/lock" @@ -90,6 +91,23 @@ func CreateEmptyProfile(name string, miniHome ...string) error { return SaveProfile(name, cfg, miniHome...) } +// SaveNodeToProfile saves a node to a cluster +func SaveNodeToProfile(cfg *ClusterConfig, node *Node) error { + update := false + for i, n := range cfg.Nodes { + if n.Name == node.Name { + cfg.Nodes[i] = *node + update = true + break + } + } + + if !update { + cfg.Nodes = append(cfg.Nodes, *node) + } + return SaveProfile(viper.GetString(MachineProfile), cfg) +} + // SaveProfile creates an profile out of the cfg and stores in $MINIKUBE_HOME/profiles//config.json func SaveProfile(name string, cfg *ClusterConfig, miniHome ...string) error { data, err := json.MarshalIndent(cfg, "", " ") diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 3b84c58842..ad326611c0 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -130,8 +130,10 @@ func TestStartHostExists(t *testing.T) { mc := defaultClusterConfig mc.Name = ih.Name + + n := config.Node{Name: ih.Name} // This should pass without calling Create because the host exists already. - h, err := StartHost(api, mc) + h, err := StartHost(api, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -162,8 +164,10 @@ func TestStartHostErrMachineNotExist(t *testing.T) { mc := defaultClusterConfig mc.Name = h.Name + n := config.Node{Name: h.Name} + // This should pass with creating host, while machine does not exist. - h, err = StartHost(api, mc) + h, err = StartHost(api, mc, n) if err != nil { if err != ErrorMachineNotExist { t.Fatalf("Error starting host: %v", err) @@ -172,8 +176,10 @@ func TestStartHostErrMachineNotExist(t *testing.T) { mc.Name = h.Name + n.Name = h.Name + // Second call. This should pass without calling Create because the host exists already. - h, err = StartHost(api, mc) + h, err = StartHost(api, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -205,7 +211,10 @@ func TestStartStoppedHost(t *testing.T) { provision.SetDetector(md) mc := defaultClusterConfig mc.Name = h.Name - h, err = StartHost(api, mc) + + n := config.Node{Name: h.Name} + + h, err = StartHost(api, mc, n) if err != nil { t.Fatal("Error starting host.") } @@ -233,7 +242,9 @@ func TestStartHost(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - h, err := StartHost(api, defaultClusterConfig) + n := config.Node{Name: viper.GetString("profile")} + + h, err := StartHost(api, defaultClusterConfig, n) if err != nil { t.Fatal("Error starting host.") } @@ -261,14 +272,16 @@ func TestStartHostConfig(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - config := config.ClusterConfig{ + cfg := config.ClusterConfig{ Driver: driver.Mock, DockerEnv: []string{"FOO=BAR"}, DockerOpt: []string{"param=value"}, Downloader: MockDownloader{}, } - h, err := StartHost(api, config) + n := config.Node{Name: viper.GetString("profile")} + + h, err := StartHost(api, cfg, n) if err != nil { t.Fatal("Error starting host.") } diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index a041e6a84f..8ea159a124 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -54,16 +54,16 @@ var ( ) // fixHost fixes up a previously configured VM so that it is ready to run Kubernetes -func fixHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, error) { +func fixHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, error) { out.T(out.Waiting, "Reconfiguring existing host ...") start := time.Now() - glog.Infof("fixHost starting: %s", mc.Name) + glog.Infof("fixHost starting: %s", n.Name) defer func() { glog.Infof("fixHost completed within %s", time.Since(start)) }() - h, err := api.Load(mc.Name) + h, err := api.Load(n.Name) if err != nil { return h, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.") } diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index c5cd2fa11f..4ae16dc005 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -61,9 +61,9 @@ var ( ) // StartHost starts a host VM. -func StartHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error) { +func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) { // Prevent machine-driver boot races, as well as our own certificate race - releaser, err := acquireMachinesLock(cfg.Name) + releaser, err := acquireMachinesLock(n.Name) if err != nil { return nil, errors.Wrap(err, "boot lock") } @@ -73,16 +73,16 @@ func StartHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error) releaser.Release() }() - exists, err := api.Exists(cfg.Name) + exists, err := api.Exists(n.Name) if err != nil { - return nil, errors.Wrapf(err, "exists: %s", cfg.Name) + return nil, errors.Wrapf(err, "exists: %s", n.Name) } if !exists { - glog.Infof("Provisioning new machine with config: %+v", cfg) + glog.Infof("Provisioning new machine with config: %+v", n) return createHost(api, cfg) } glog.Infoln("Skipping create...Using existing machine configuration") - return fixHost(api, cfg) + return fixHost(api, cfg, n) } func engineOptions(cfg config.ClusterConfig) *engine.Options { diff --git a/pkg/minikube/node/config.go b/pkg/minikube/node/config.go index 57ab7bf8fd..3448b29ce0 100644 --- a/pkg/minikube/node/config.go +++ b/pkg/minikube/node/config.go @@ -35,17 +35,6 @@ import ( "k8s.io/minikube/pkg/util/lock" ) -var ( - // DockerEnv contains the environment variables - DockerEnv []string - // DockerOpt contains the option parameters - DockerOpt []string - // ExtraOptions contains extra options (if any) - ExtraOptions config.ExtraOptionSlice - // AddonList contains the list of addons - AddonList []string -) - // configureRuntimes does what needs to happen to get a runtime going. func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig) cruntime.Manager { config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: runner, ImageRepository: k8s.ImageRepository, KubernetesVersion: k8s.KubernetesVersion} @@ -69,66 +58,14 @@ func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config func showVersionInfo(k8sVersion string, cr cruntime.Manager) { version, _ := cr.Version() out.T(cr.Style(), "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...", out.V{"k8sVersion": k8sVersion, "runtime": cr.Name(), "runtimeVersion": version}) - for _, v := range DockerOpt { + for _, v := range config.DockerOpt { out.T(out.Option, "opt {{.docker_option}}", out.V{"docker_option": v}) } - for _, v := range DockerEnv { + for _, v := range config.DockerEnv { out.T(out.Option, "env {{.docker_env}}", out.V{"docker_env": v}) } } -<<<<<<< HEAD -======= -// setupKubeAdm adds any requested files into the VM before Kubernetes is started -func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, node config.Node) bootstrapper.Bootstrapper { - bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper)) - if err != nil { - exit.WithError("Failed to get bootstrapper", err) - } - for _, eo := range ExtraOptions { - out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) - } - // Loads cached images, generates config files, download binaries - if err := bs.UpdateCluster(cfg); err != nil { - exit.WithError("Failed to update cluster", err) - } - if err := bs.SetupCerts(cfg.KubernetesConfig, node); err != nil { - exit.WithError("Failed to setup certs", err) - } - return bs -} - -func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { - addr, err := h.Driver.GetURL() - if err != nil { - exit.WithError("Failed to get driver URL", err) - } - if !driver.IsKIC(h.DriverName) { - addr = strings.Replace(addr, "tcp://", "https://", -1) - addr = strings.Replace(addr, ":2376", ":"+strconv.Itoa(n.Port), -1) - } - - if c.KubernetesConfig.APIServerName != constants.APIServerName { - addr = strings.Replace(addr, n.IP, c.KubernetesConfig.APIServerName, -1) - } - kcs := &kubeconfig.Settings{ - ClusterName: clusterName, - ClusterServerAddress: addr, - ClientCertificate: localpath.MakeMiniPath("client.crt"), - ClientKey: localpath.MakeMiniPath("client.key"), - CertificateAuthority: localpath.MakeMiniPath("ca.crt"), - KeepContext: viper.GetBool(keepContext), - EmbedCerts: viper.GetBool(embedCerts), - } - - kcs.SetPath(kubeconfig.PathFromEnv()) - if err := kubeconfig.Update(kcs); err != nil { - return kcs, err - } - return kcs, nil -} - ->>>>>>> c4e2236e2b2966cb05fa11b3bdc8cf1d060a270c // configureMounts configures any requested filesystem mounts func configureMounts() { if !viper.GetBool(createMount) { diff --git a/pkg/minikube/node/machine.go b/pkg/minikube/node/machine.go deleted file mode 100644 index 279e233c2a..0000000000 --- a/pkg/minikube/node/machine.go +++ /dev/null @@ -1,185 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package node - -import ( - "fmt" - "net" - "os" - "os/exec" - "strings" - "time" - - "github.com/docker/machine/libmachine" - "github.com/docker/machine/libmachine/host" - "github.com/golang/glog" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/command" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" - "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/minikube/proxy" - "k8s.io/minikube/pkg/util/retry" -) - -// StartMachine starts a VM -func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { - m, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Failed to get machine client", err) - } - host, preExists = startHost(m, *cfg) - runner, err = machine.CommandRunner(host) - if err != nil { - exit.WithError("Failed to get command runner", err) - } - - ip := validateNetwork(host, runner) - - // Bypass proxy for minikube's vm host ip - err = proxy.ExcludeIP(ip) - if err != nil { - out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) - } - // Save IP to configuration file for subsequent use - node.IP = ip - - if err := Save(cfg, node); err != nil { - exit.WithError("Failed to save config", err) - } - - return runner, preExists, m, host -} - -// startHost starts a new minikube host using a VM or None -func startHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, bool) { - exists, err := api.Exists(mc.Name) - if err != nil { - exit.WithError("Failed to check if machine exists", err) - } - - host, err := machine.StartHost(api, mc) - if err != nil { - exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) - } - return host, exists -} - -// validateNetwork tries to catch network problems as soon as possible -func validateNetwork(h *host.Host, r command.Runner) string { - ip, err := h.Driver.GetIP() - if err != nil { - exit.WithError("Unable to get VM IP address", err) - } - - optSeen := false - warnedOnce := false - for _, k := range proxy.EnvVars { - if v := os.Getenv(k); v != "" { - if !optSeen { - out.T(out.Internet, "Found network options:") - optSeen = true - } - out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v}) - ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY - k = strings.ToUpper(k) // for http_proxy & https_proxy - if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { - out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"}) - warnedOnce = true - } - } - } - - if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) { - trySSH(h, ip) - } - - tryLookup(r) - tryRegistry(r) - return ip -} - -func trySSH(h *host.Host, ip string) { - if viper.GetBool("force") { - return - } - - sshAddr := net.JoinHostPort(ip, "22") - - dial := func() (err error) { - d := net.Dialer{Timeout: 3 * time.Second} - conn, err := d.Dial("tcp", sshAddr) - if err != nil { - out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err}) - return err - } - _ = conn.Close() - return nil - } - - if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil { - exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}} - - This is likely due to one of two reasons: - - - VPN or firewall interference - - {{.hypervisor}} network configuration issue - - Suggested workarounds: - - - Disable your local VPN or firewall software - - Configure your local VPN or firewall to allow access to {{.ip}} - - Restart or reinstall {{.hypervisor}} - - Use an alternative --vm-driver - - Use --force to override this connectivity check - `, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip}) - } -} - -func tryLookup(r command.Runner) { - // DNS check - if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil { - glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err) - // will try with without query type for ISOs with different busybox versions. - if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil { - glog.Warningf("nslookup failed: %v", err) - out.WarningT("Node may be unable to resolve external DNS records") - } - } -} -func tryRegistry(r command.Runner) { - // Try an HTTPS connection to the image repository - proxy := os.Getenv("HTTPS_PROXY") - opts := []string{"-sS"} - if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { - opts = append([]string{"-x", proxy}, opts...) - } - - repo := viper.GetString(imageRepository) - if repo == "" { - repo = images.DefaultKubernetesRepo - } - - opts = append(opts, fmt.Sprintf("https://%s/", repo)) - if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil { - glog.Warningf("%s failed: %v", rr.Args, err) - out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo}) - } -} diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 4e2f75b94b..ac247b0ce3 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -25,7 +25,6 @@ import ( ) const ( - imageRepository = "image-repository" cacheImages = "cache-images" waitUntilHealthy = "wait" cacheImageConfigKey = "cache" @@ -38,7 +37,7 @@ const ( ) // Add adds a new node config to an existing cluster. -func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool, k8sVersion string, profileName string) (*config.Node, error) { +func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool, k8sVersion string, profileName string) error { n := config.Node{ Name: name, Worker: true, @@ -62,11 +61,12 @@ func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool, cc.Nodes = append(cc.Nodes, n) err := config.SaveProfile(profileName, cc) if err != nil { - return nil, err + return err } - _, err = Start(*cc, n, false, nil) - return &n, err + err = Start(*cc, n, nil) + + return err } // Delete stops and deletes the given node from the given cluster diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 36f1155f2f..d43d9f455d 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -21,6 +21,7 @@ import ( "github.com/spf13/viper" "golang.org/x/sync/errgroup" + cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" @@ -32,25 +33,25 @@ import ( ) // Start spins up a guest and starts the kubernetes node. -func Start(mc config.ClusterConfig, n config.Node, preExists bool, existingAddons map[string]bool) error { +func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) error { // Now that the ISO is downloaded, pull images in the background while the VM boots. var cacheGroup errgroup.Group - beginCacheRequiredImages(&cacheGroup, mc.KubernetesConfig.ImageRepository, n.KubernetesVersion) + beginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion) - // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. - // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. - if err := config.SaveProfile(viper.GetString(config.MachineProfile), &mc); err != nil { - exit.WithError("Failed to save config", err) + runner, preExists, mAPI, _ := cluster.StartMachine(&cc, &n) + defer mAPI.Close() + + bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), n.Name) + if err != nil { + exit.WithError("Failed to get bootstrapper", err) } - bs, err := cluster.Bootstrapper() - - k8sVersion := mc.KubernetesConfig.KubernetesVersion - driverName := mc.Driver + k8sVersion := cc.KubernetesConfig.KubernetesVersion + driverName := cc.Driver // exits here in case of --download-only option. handleDownloadOnly(&cacheGroup, k8sVersion) // configure the runtime (docker, containerd, crio) - cr := configureRuntimes(mRunner, driverName, mc.KubernetesConfig) + cr := configureRuntimes(runner, driverName, cc.KubernetesConfig) showVersionInfo(k8sVersion, cr) waitCacheRequiredImages(&cacheGroup) @@ -58,7 +59,11 @@ func Start(mc config.ClusterConfig, n config.Node, preExists bool, existingAddon // enable addons, both old and new! if existingAddons != nil { - addons.Start(viper.GetString(config.MachineProfile), existingAddons, AddonList) + addons.Start(viper.GetString(config.MachineProfile), existingAddons, config.AddonList) + } + + if err := bs.UpdateNode(cc, n, cr); err != nil { + exit.WithError("Failed to update node", err) } if err := CacheAndLoadImagesInConfig(); err != nil { @@ -66,18 +71,30 @@ func Start(mc config.ClusterConfig, n config.Node, preExists bool, existingAddon } // special ops for none , like change minikube directory. - if driverName == driver.None { + // multinode super doesn't work on the none driver + if driverName == driver.None && len(cc.Nodes) == 1 { prepareNone() } // Skip pre-existing, because we already waited for health if viper.GetBool(waitUntilHealthy) && !preExists { - if err := bs.WaitForCluster(mc, viper.GetDuration(waitTimeout)); err != nil { + if err := bs.WaitForCluster(cc, viper.GetDuration(waitTimeout)); err != nil { exit.WithError("Wait failed", err) } } - return nil + bs.SetupCerts(cc.KubernetesConfig, n) + + cp, err := config.PrimaryControlPlane(cc) + if err != nil { + exit.WithError("Getting primary control plane", err) + } + cpBs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cp.Name) + if err != nil { + exit.WithError("Getting bootstrapper", err) + } + joinCmd, err := cpBs.GenerateToken(cc.KubernetesConfig) + return bs.JoinCluster(cc, n, joinCmd) } // prepareNone prepares the user and host for the joy of the "none" driver From 9a3ecab61a67ef6fd179125c47d38dcb84573b0d Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 15:09:04 -0800 Subject: [PATCH 021/668] fixed more stuff --- cmd/minikube/cmd/node.go | 7 ++- cmd/minikube/cmd/node_add.go | 4 ++ cmd/minikube/cmd/node_delete.go | 2 +- pkg/drivers/hyperkit/driver.go | 4 +- pkg/minikube/config/config.go | 8 ++++ pkg/minikube/config/node.go | 47 ------------------- pkg/minikube/machine/cache_images.go | 44 ++++++++--------- pkg/minikube/machine/cluster_test.go | 43 ++++++++++------- pkg/minikube/machine/fix.go | 2 +- pkg/minikube/machine/start.go | 12 ++--- pkg/minikube/node/node.go | 21 +-------- pkg/minikube/node/start.go | 5 ++ pkg/minikube/registry/drvs/docker/docker.go | 6 +-- .../registry/drvs/hyperkit/hyperkit.go | 27 ++++++----- .../registry/drvs/parallels/parallels.go | 14 +++--- pkg/minikube/registry/drvs/podman/podman.go | 6 +-- .../registry/drvs/virtualbox/virtualbox.go | 4 +- pkg/minikube/registry/drvs/vmware/vmware.go | 4 +- .../drvs/vmwarefusion/vmwarefusion.go | 14 +++--- pkg/minikube/registry/registry.go | 2 +- 20 files changed, 120 insertions(+), 156 deletions(-) delete mode 100644 pkg/minikube/config/node.go diff --git a/cmd/minikube/cmd/node.go b/cmd/minikube/cmd/node.go index 7b70780f74..39dbac6c7c 100644 --- a/cmd/minikube/cmd/node.go +++ b/cmd/minikube/cmd/node.go @@ -23,10 +23,9 @@ import ( // nodeCmd represents the set of node subcommands var nodeCmd = &cobra.Command{ - Use: "node", - Short: "Node operations", - Long: "Operations on nodes", - Hidden: true, // This won't be fully functional and thus should not be documented yet + Use: "node", + Short: "Node operations", + Long: "Operations on nodes", Run: func(cmd *cobra.Command, args []string) { exit.UsageT("Usage: minikube node [add|start|stop|delete]") }, diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index cf3a1c626e..fe5557258d 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -47,6 +47,10 @@ var nodeAddCmd = &cobra.Command{ if nodeName == "" { name = profile + strconv.Itoa(len(mc.Nodes)+1) } + _, _, err = node.Retrieve(mc, name) + if err == nil { + exit.WithCodeT(100, "{{.nodeName}} already exists in cluster {{.cluster}}. Choose a different name.", out.V{"nodeName": name, "cluster": mc.Name}) + } out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) err = node.Add(mc, name, cp, worker, "", profile) diff --git a/cmd/minikube/cmd/node_delete.go b/cmd/minikube/cmd/node_delete.go index 92e5e5755d..33d6ca6660 100644 --- a/cmd/minikube/cmd/node_delete.go +++ b/cmd/minikube/cmd/node_delete.go @@ -46,7 +46,7 @@ var nodeDeleteCmd = &cobra.Command{ err = node.Delete(*cc, name) if err != nil { - out.FatalT("Failed to delete node {{.name}}", out.V{"name": name}) + exit.WithError("deleting node", err) } out.T(out.Deleted, "Node {{.name}} was successfully deleted.", out.V{"name": name}) diff --git a/pkg/drivers/hyperkit/driver.go b/pkg/drivers/hyperkit/driver.go index bf775240e6..9659237408 100644 --- a/pkg/drivers/hyperkit/driver.go +++ b/pkg/drivers/hyperkit/driver.go @@ -65,6 +65,7 @@ type Driver struct { UUID string VpnKitSock string VSockPorts []string + ClusterName string } // NewDriver creates a new driver for a host @@ -199,7 +200,7 @@ func (d *Driver) Restart() error { } func (d *Driver) createHost() (*hyperkit.HyperKit, error) { - stateDir := filepath.Join(d.StorePath, "machines", d.MachineName) + stateDir := filepath.Join(d.StorePath, "machines", d.ClusterName, d.MachineName) h, err := hyperkit.New("", d.VpnKitSock, stateDir) if err != nil { return nil, errors.Wrap(err, "new-ing Hyperkit") @@ -519,6 +520,7 @@ func (d *Driver) sendSignal(s os.Signal) error { func (d *Driver) getPid() int { pidPath := d.ResolveStorePath(machineFileName) + log.Debugf("PIDPATH=%s", pidPath) f, err := os.Open(pidPath) if err != nil { log.Warnf("Error reading pid file: %v", err) diff --git a/pkg/minikube/config/config.go b/pkg/minikube/config/config.go index 351d6a177d..bf7e0bdd28 100644 --- a/pkg/minikube/config/config.go +++ b/pkg/minikube/config/config.go @@ -52,6 +52,14 @@ const ( var ( // ErrKeyNotFound is the error returned when a key doesn't exist in the config file ErrKeyNotFound = errors.New("specified key could not be found in config") + // DockerEnv contains the environment variables + DockerEnv []string + // DockerOpt contains the option parameters + DockerOpt []string + // ExtraOptions contains extra options (if any) + ExtraOptions ExtraOptionSlice + // AddonList contains the list of addons + AddonList []string ) // ErrNotExist is the error returned when a config does not exist diff --git a/pkg/minikube/config/node.go b/pkg/minikube/config/node.go deleted file mode 100644 index 572a182553..0000000000 --- a/pkg/minikube/config/node.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -var ( - // DockerEnv contains the environment variables - DockerEnv []string - // DockerOpt contains the option parameters - DockerOpt []string - // ExtraOptions contains extra options (if any) - ExtraOptions ExtraOptionSlice - // AddonList contains the list of addons - AddonList []string -) - -// AddNode adds a new node config to an existing cluster. -func AddNode(cc *ClusterConfig, name string, controlPlane bool, k8sVersion string, profileName string) error { - node := Node{ - Name: name, - Worker: true, - } - - if controlPlane { - node.ControlPlane = true - } - - if k8sVersion != "" { - node.KubernetesVersion = k8sVersion - } - - cc.Nodes = append(cc.Nodes, node) - return SaveProfile(profileName, cc) -} diff --git a/pkg/minikube/machine/cache_images.go b/pkg/minikube/machine/cache_images.go index 301c3b02fd..40e35a2c69 100644 --- a/pkg/minikube/machine/cache_images.go +++ b/pkg/minikube/machine/cache_images.go @@ -137,29 +137,31 @@ func CacheAndLoadImages(images []string) error { return errors.Wrap(err, "list profiles") } for _, p := range profiles { // loading images to all running profiles - pName := p.Name // capture the loop variable - status, err := GetHostStatus(api, pName) - if err != nil { - glog.Warningf("skipping loading cache for profile %s", pName) - glog.Errorf("error getting status for %s: %v", pName, err) - continue // try next machine - } - if status == state.Running.String() { // the not running hosts will load on next start - h, err := api.Load(pName) + for _, n := range p.Config.Nodes { + pName := n.Name // capture the loop variable + status, err := GetHostStatus(api, pName) if err != nil { - return err + glog.Warningf("skipping loading cache for profile %s", pName) + glog.Errorf("error getting status for %s: %v", pName, err) + continue // try next machine } - cr, err := CommandRunner(h) - if err != nil { - return err - } - c, err := config.Load(pName) - if err != nil { - return err - } - err = LoadImages(c, cr, images, constants.ImageCacheDir) - if err != nil { - glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err) + if status == state.Running.String() { // the not running hosts will load on next start + h, err := api.Load(pName) + if err != nil { + return err + } + cr, err := CommandRunner(h) + if err != nil { + return err + } + c, err := config.Load(pName) + if err != nil { + return err + } + err = LoadImages(c, cr, images, constants.ImageCacheDir) + if err != nil { + glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err) + } } } } diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index ad326611c0..2053320a75 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -41,7 +41,7 @@ type MockDownloader struct{} func (d MockDownloader) GetISOFileURI(isoURL string) string { return "" } func (d MockDownloader) CacheMinikubeISOFromURL(isoURL string) error { return nil } -func createMockDriverHost(c config.ClusterConfig) (interface{}, error) { +func createMockDriverHost(c config.ClusterConfig, n config.Node) (interface{}, error) { return nil, nil } @@ -67,28 +67,35 @@ var defaultClusterConfig = config.ClusterConfig{ DockerEnv: []string{"MOCK_MAKE_IT_PROVISION=true"}, } +var defaultNodeConfig = config.Node{ + Name: viper.GetString("profile"), +} + func TestCreateHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - exists, _ := api.Exists(viper.GetString("profile")) + profile := viper.GetString("profile") + exists, _ := api.Exists(profile) if exists { t.Fatal("Machine already exists.") } - _, err := createHost(api, defaultClusterConfig) + n := config.Node{Name: profile} + + _, err := createHost(api, defaultClusterConfig, n) if err != nil { t.Fatalf("Error creating host: %v", err) } - exists, err = api.Exists(viper.GetString("profile")) + exists, err = api.Exists(profile) if err != nil { - t.Fatalf("exists failed for %q: %v", viper.GetString("profile"), err) + t.Fatalf("exists failed for %q: %v", profile, err) } if !exists { - t.Fatalf("%q does not exist, but should.", viper.GetString("profile")) + t.Fatalf("%q does not exist, but should.", profile) } - h, err := api.Load(viper.GetString("profile")) + h, err := api.Load(profile) if err != nil { t.Fatalf("Error loading machine: %v", err) } @@ -113,8 +120,9 @@ func TestCreateHost(t *testing.T) { func TestStartHostExists(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) + // Create an initial host. - ih, err := createHost(api, defaultClusterConfig) + ih, err := createHost(api, defaultClusterConfig, defaultNodeConfig) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -131,9 +139,8 @@ func TestStartHostExists(t *testing.T) { mc := defaultClusterConfig mc.Name = ih.Name - n := config.Node{Name: ih.Name} // This should pass without calling Create because the host exists already. - h, err := StartHost(api, mc, n) + h, err := StartHost(api, mc, defaultNodeConfig) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -153,7 +160,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { api := tests.NewMockAPI(t) // Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel) api.NotExistError = true - h, err := createHost(api, defaultClusterConfig) + h, err := createHost(api, defaultClusterConfig, defaultNodeConfig) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -199,7 +206,7 @@ func TestStartStoppedHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) // Create an initial host. - h, err := createHost(api, defaultClusterConfig) + h, err := createHost(api, defaultClusterConfig, defaultNodeConfig) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -311,7 +318,7 @@ func TestStopHostError(t *testing.T) { func TestStopHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - h, err := createHost(api, defaultClusterConfig) + h, err := createHost(api, defaultClusterConfig, defaultNodeConfig) if err != nil { t.Errorf("createHost failed: %v", err) } @@ -327,7 +334,7 @@ func TestStopHost(t *testing.T) { func TestDeleteHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - if _, err := createHost(api, defaultClusterConfig); err != nil { + if _, err := createHost(api, defaultClusterConfig, defaultNodeConfig); err != nil { t.Errorf("createHost failed: %v", err) } @@ -339,7 +346,7 @@ func TestDeleteHost(t *testing.T) { func TestDeleteHostErrorDeletingVM(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - h, err := createHost(api, defaultClusterConfig) + h, err := createHost(api, defaultClusterConfig, defaultNodeConfig) if err != nil { t.Errorf("createHost failed: %v", err) } @@ -356,7 +363,7 @@ func TestDeleteHostErrorDeletingFiles(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) api.RemoveError = true - if _, err := createHost(api, defaultClusterConfig); err != nil { + if _, err := createHost(api, defaultClusterConfig, defaultNodeConfig); err != nil { t.Errorf("createHost failed: %v", err) } @@ -370,7 +377,7 @@ func TestDeleteHostErrMachineNotExist(t *testing.T) { api := tests.NewMockAPI(t) // Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel) api.NotExistError = true - _, err := createHost(api, defaultClusterConfig) + _, err := createHost(api, defaultClusterConfig, defaultNodeConfig) if err != nil { t.Errorf("createHost failed: %v", err) } @@ -396,7 +403,7 @@ func TestGetHostStatus(t *testing.T) { checkState(state.None.String()) - if _, err := createHost(api, defaultClusterConfig); err != nil { + if _, err := createHost(api, defaultClusterConfig, defaultNodeConfig); err != nil { t.Errorf("createHost failed: %v", err) } diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 8ea159a124..3b465e5025 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -88,7 +88,7 @@ func fixHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host. } // recreate virtual machine out.T(out.Meh, "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.", out.V{"name": mc.Name}) - h, err = createHost(api, mc) + h, err = createHost(api, mc, n) if err != nil { return nil, errors.Wrap(err, "Error recreating VM") } diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index 4ae16dc005..1d34f28070 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -69,7 +69,7 @@ func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*ho } start := time.Now() defer func() { - glog.Infof("releasing machines lock for %q, held for %s", cfg.Name, time.Since(start)) + glog.Infof("releasing machines lock for %q, held for %s", n.Name, time.Since(start)) releaser.Release() }() @@ -78,8 +78,8 @@ func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*ho return nil, errors.Wrapf(err, "exists: %s", n.Name) } if !exists { - glog.Infof("Provisioning new machine with config: %+v", n) - return createHost(api, cfg) + glog.Infof("Provisioning new machine with config: %+v %+v", cfg, n) + return createHost(api, cfg, n) } glog.Infoln("Skipping create...Using existing machine configuration") return fixHost(api, cfg, n) @@ -96,8 +96,8 @@ func engineOptions(cfg config.ClusterConfig) *engine.Options { return &o } -func createHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error) { - glog.Infof("createHost starting for %q (driver=%q)", cfg.Name, cfg.Driver) +func createHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) { + glog.Infof("createHost starting for %q (driver=%q)", n.Name, cfg.Driver) start := time.Now() defer func() { glog.Infof("createHost completed in %s", time.Since(start)) @@ -114,7 +114,7 @@ func createHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error if def.Empty() { return nil, fmt.Errorf("unsupported/missing driver: %s", cfg.Driver) } - dd, err := def.Config(cfg) + dd, err := def.Config(cfg, n) if err != nil { return nil, errors.Wrap(err, "config") } diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index ac247b0ce3..af9856d077 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -19,6 +19,7 @@ package node import ( "errors" + "github.com/golang/glog" "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/machine" @@ -76,10 +77,9 @@ func Delete(cc config.ClusterConfig, name string) error { return err } - /*err = Stop(cc, nd) if err != nil { glog.Warningf("Failed to stop node %s. Will still try to delete.", name) - }*/ + } api, err := machine.NewAPIClient() if err != nil { @@ -105,20 +105,3 @@ func Retrieve(cc *config.ClusterConfig, name string) (*config.Node, int, error) return nil, -1, errors.New("Could not find node " + name) } - -// Save saves a node to a cluster -func Save(cfg *config.ClusterConfig, node *config.Node) error { - update := false - for i, n := range cfg.Nodes { - if n.Name == node.Name { - cfg.Nodes[i] = *node - update = true - break - } - } - - if !update { - cfg.Nodes = append(cfg.Nodes, *node) - } - return config.SaveProfile(viper.GetString(config.MachineProfile), cfg) -} diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index d43d9f455d..22dd8ac868 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -38,6 +38,11 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo var cacheGroup errgroup.Group beginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion) + // Why do we need this? + if cc.Downloader == nil { + cc.Downloader = util.DefaultDownloader{} + } + runner, preExists, mAPI, _ := cluster.StartMachine(&cc, &n) defer mAPI.Close() diff --git a/pkg/minikube/registry/drvs/docker/docker.go b/pkg/minikube/registry/drvs/docker/docker.go index 0b66dfdecb..d5278a8f92 100644 --- a/pkg/minikube/registry/drvs/docker/docker.go +++ b/pkg/minikube/registry/drvs/docker/docker.go @@ -43,15 +43,15 @@ func init() { } } -func configure(mc config.ClusterConfig) (interface{}, error) { +func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { return kic.NewDriver(kic.Config{ - MachineName: mc.Name, + MachineName: n.Name, StorePath: localpath.MiniPath(), ImageDigest: kic.BaseImage, CPU: mc.CPUs, Memory: mc.Memory, OCIBinary: oci.Docker, - APIServerPort: mc.Nodes[0].Port, + APIServerPort: n.Port, }), nil } diff --git a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go index 47a3db9091..b9e4b4f09f 100644 --- a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go +++ b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go @@ -31,7 +31,7 @@ import ( "github.com/pborman/uuid" "k8s.io/minikube/pkg/drivers/hyperkit" - cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/registry" @@ -57,28 +57,29 @@ func init() { } } -func configure(config cfg.ClusterConfig) (interface{}, error) { - u := config.UUID +func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { + u := cfg.UUID if u == "" { u = uuid.NewUUID().String() } return &hyperkit.Driver{ BaseDriver: &drivers.BaseDriver{ - MachineName: config.Name, + MachineName: cfg.Name, StorePath: localpath.MiniPath(), SSHUser: "docker", }, - Boot2DockerURL: config.Downloader.GetISOFileURI(config.MinikubeISO), - DiskSize: config.DiskSize, - Memory: config.Memory, - CPU: config.CPUs, - NFSShares: config.NFSShare, - NFSSharesRoot: config.NFSSharesRoot, + ClusterName: cfg.Name, + Boot2DockerURL: cfg.Downloader.GetISOFileURI(cfg.MinikubeISO), + DiskSize: cfg.DiskSize, + Memory: cfg.Memory, + CPU: cfg.CPUs, + NFSShares: cfg.NFSShare, + NFSSharesRoot: cfg.NFSSharesRoot, UUID: u, - VpnKitSock: config.HyperkitVpnKitSock, - VSockPorts: config.HyperkitVSockPorts, - Cmdline: "loglevel=3 console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 systemd.legacy_systemd_cgroup_controller=yes random.trust_cpu=on hw_rng_model=virtio base host=" + config.Name, + VpnKitSock: cfg.HyperkitVpnKitSock, + VSockPorts: cfg.HyperkitVSockPorts, + Cmdline: "loglevel=3 console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 systemd.legacy_systemd_cgroup_controller=yes random.trust_cpu=on hw_rng_model=virtio base host=" + n.Name, }, nil } diff --git a/pkg/minikube/registry/drvs/parallels/parallels.go b/pkg/minikube/registry/drvs/parallels/parallels.go index 79d0e9085e..29095a6226 100644 --- a/pkg/minikube/registry/drvs/parallels/parallels.go +++ b/pkg/minikube/registry/drvs/parallels/parallels.go @@ -24,7 +24,7 @@ import ( parallels "github.com/Parallels/docker-machine-parallels" "github.com/docker/machine/libmachine/drivers" - cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/registry" @@ -44,12 +44,12 @@ func init() { } -func configure(config cfg.ClusterConfig) (interface{}, error) { - d := parallels.NewDriver(config.Name, localpath.MiniPath()).(*parallels.Driver) - d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) - d.Memory = config.Memory - d.CPU = config.CPUs - d.DiskSize = config.DiskSize +func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { + d := parallels.NewDriver(n.Name, localpath.MiniPath()).(*parallels.Driver) + d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) + d.Memory = cfg.Memory + d.CPU = cfg.CPUs + d.DiskSize = cfg.DiskSize return d, nil } diff --git a/pkg/minikube/registry/drvs/podman/podman.go b/pkg/minikube/registry/drvs/podman/podman.go index ec5d6013ac..eab3200b83 100644 --- a/pkg/minikube/registry/drvs/podman/podman.go +++ b/pkg/minikube/registry/drvs/podman/podman.go @@ -49,15 +49,15 @@ func init() { } } -func configure(mc config.ClusterConfig) (interface{}, error) { +func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { return kic.NewDriver(kic.Config{ - MachineName: mc.Name, + MachineName: n.Name, StorePath: localpath.MiniPath(), ImageDigest: strings.Split(kic.BaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest. CPU: mc.CPUs, Memory: mc.Memory, OCIBinary: oci.Podman, - APIServerPort: mc.Nodes[0].Port, + APIServerPort: n.Port, }), nil } diff --git a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go index c3888c3758..7dd13af948 100644 --- a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go +++ b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go @@ -49,8 +49,8 @@ func init() { } } -func configure(mc config.ClusterConfig) (interface{}, error) { - d := virtualbox.NewDriver(mc.Name, localpath.MiniPath()) +func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { + d := virtualbox.NewDriver(n.Name, localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory d.CPU = mc.CPUs diff --git a/pkg/minikube/registry/drvs/vmware/vmware.go b/pkg/minikube/registry/drvs/vmware/vmware.go index 0333dce541..b6a90929c9 100644 --- a/pkg/minikube/registry/drvs/vmware/vmware.go +++ b/pkg/minikube/registry/drvs/vmware/vmware.go @@ -39,8 +39,8 @@ func init() { } } -func configure(mc config.ClusterConfig) (interface{}, error) { - d := vmwcfg.NewConfig(mc.Name, localpath.MiniPath()) +func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { + d := vmwcfg.NewConfig(n.Name, localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory d.CPU = mc.CPUs diff --git a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go index 524e50f88c..adc50d70e8 100644 --- a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go +++ b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go @@ -26,7 +26,7 @@ import ( "github.com/docker/machine/libmachine/drivers" "github.com/pkg/errors" - cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/registry" @@ -44,12 +44,12 @@ func init() { } } -func configure(config cfg.ClusterConfig) (interface{}, error) { - d := vmwarefusion.NewDriver(config.Name, localpath.MiniPath()).(*vmwarefusion.Driver) - d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) - d.Memory = config.Memory - d.CPU = config.CPUs - d.DiskSize = config.DiskSize +func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { + d := vmwarefusion.NewDriver(n.Name, localpath.MiniPath()).(*vmwarefusion.Driver) + d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) + d.Memory = cfg.Memory + d.CPU = cfg.CPUs + d.DiskSize = cfg.DiskSize // TODO(philips): push these defaults upstream to fixup this driver d.SSHPort = 22 diff --git a/pkg/minikube/registry/registry.go b/pkg/minikube/registry/registry.go index e5fb98ce51..794dffc18a 100644 --- a/pkg/minikube/registry/registry.go +++ b/pkg/minikube/registry/registry.go @@ -60,7 +60,7 @@ type Registry interface { } // Configurator emits a struct to be marshalled into JSON for Machine Driver -type Configurator func(config.ClusterConfig) (interface{}, error) +type Configurator func(config.ClusterConfig, config.Node) (interface{}, error) // Loader is a function that loads a byte stream and creates a driver. type Loader func() drivers.Driver From be9c5f476149bc8904924c24043568180b0db70c Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 15:58:35 -0800 Subject: [PATCH 022/668] pass in the node object into add --- cmd/minikube/cmd/node_add.go | 10 +++++++++- cmd/minikube/cmd/start.go | 8 +++++++- pkg/minikube/node/node.go | 24 ++---------------------- 3 files changed, 18 insertions(+), 24 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index fe5557258d..8cb836edea 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -53,7 +53,15 @@ var nodeAddCmd = &cobra.Command{ } out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) - err = node.Add(mc, name, cp, worker, "", profile) + // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. + n := config.Node{ + Name: name, + Worker: worker, + ControlPlane: cp, + KubernetesVersion: mc.KubernetesConfig.KubernetesVersion, + } + + err = node.Add(mc, n) if err != nil { exit.WithError("Error adding node to cluster", err) } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index fbb30b7adb..0aa5a8691a 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -357,7 +357,13 @@ func runStart(cmd *cobra.Command, args []string) { if numNodes > 1 { for i := 0; i < numNodes-1; i++ { nodeName := fmt.Sprintf("%s%d", n.Name, i+1) - node.Add(&mc, nodeName, false, true, "", "") + n := config.Node{ + Name: nodeName, + Worker: true, + ControlPlane: false, + KubernetesVersion: mc.KubernetesConfig.KubernetesVersion, + } + node.Add(&mc, n) } } } diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index af9856d077..86df75e021 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -38,29 +38,9 @@ const ( ) // Add adds a new node config to an existing cluster. -func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool, k8sVersion string, profileName string) error { - n := config.Node{ - Name: name, - Worker: true, - } - - // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. - if controlPlane { - n.ControlPlane = true - } - - if !worker { - n.Worker = false - } - - if k8sVersion != "" { - n.KubernetesVersion = k8sVersion - } else { - n.KubernetesVersion = cc.KubernetesConfig.KubernetesVersion - } - +func Add(cc *config.ClusterConfig, n config.Node) error { cc.Nodes = append(cc.Nodes, n) - err := config.SaveProfile(profileName, cc) + err := config.SaveProfile(cc.Name, cc) if err != nil { return err } From f0ca34b0b549f267e60622cdb505a00e265ef615 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 16:17:29 -0800 Subject: [PATCH 023/668] fix unit tests --- pkg/minikube/machine/cluster_test.go | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 2053320a75..449acd729e 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -75,27 +75,26 @@ func TestCreateHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - profile := viper.GetString("profile") - exists, _ := api.Exists(profile) + exists, _ := api.Exists(viper.GetString("profile")) if exists { t.Fatal("Machine already exists.") } - n := config.Node{Name: profile} + n := config.Node{Name: viper.GetString("profile")} _, err := createHost(api, defaultClusterConfig, n) if err != nil { t.Fatalf("Error creating host: %v", err) } - exists, err = api.Exists(profile) + exists, err = api.Exists(viper.GetString("profile")) if err != nil { - t.Fatalf("exists failed for %q: %v", profile, err) + t.Fatalf("exists failed for %q: %v", viper.GetString("profile"), err) } if !exists { - t.Fatalf("%q does not exist, but should.", profile) + t.Fatalf("%q does not exist, but should.", viper.GetString("profile")) } - h, err := api.Load(profile) + h, err := api.Load(viper.GetString("profile")) if err != nil { t.Fatalf("Error loading machine: %v", err) } @@ -121,6 +120,8 @@ func TestStartHostExists(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) + n := defaultNodeConfig + // Create an initial host. ih, err := createHost(api, defaultClusterConfig, defaultNodeConfig) if err != nil { @@ -139,8 +140,10 @@ func TestStartHostExists(t *testing.T) { mc := defaultClusterConfig mc.Name = ih.Name + n.Name = ih.Name + // This should pass without calling Create because the host exists already. - h, err := StartHost(api, mc, defaultNodeConfig) + h, err := StartHost(api, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -294,13 +297,13 @@ func TestStartHostConfig(t *testing.T) { } for i := range h.HostOptions.EngineOptions.Env { - if h.HostOptions.EngineOptions.Env[i] != config.DockerEnv[i] { + if h.HostOptions.EngineOptions.Env[i] != cfg.DockerEnv[i] { t.Fatal("Docker env variables were not set!") } } for i := range h.HostOptions.EngineOptions.ArbitraryFlags { - if h.HostOptions.EngineOptions.ArbitraryFlags[i] != config.DockerOpt[i] { + if h.HostOptions.EngineOptions.ArbitraryFlags[i] != cfg.DockerOpt[i] { t.Fatal("Docker flags were not set!") } } From b2ba874d560b1b2b613ffe7d8d837c7890918630 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 16:27:05 -0800 Subject: [PATCH 024/668] SaveNode is simpler yeah --- pkg/minikube/cluster/setup.go | 2 +- pkg/minikube/config/profile.go | 4 ++-- pkg/minikube/node/node.go | 5 ++--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index d786312820..dc1f46a0cd 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -165,7 +165,7 @@ func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command. } node.IP = ip - config.SaveNodeToProfile(cfg, node) + config.SaveNode(cfg, node) return runner, preExists, m, host } diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index 0acfe1a8b4..78dee9a57e 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -91,8 +91,8 @@ func CreateEmptyProfile(name string, miniHome ...string) error { return SaveProfile(name, cfg, miniHome...) } -// SaveNodeToProfile saves a node to a cluster -func SaveNodeToProfile(cfg *ClusterConfig, node *Node) error { +// SaveNode saves a node to a cluster +func SaveNode(cfg *ClusterConfig, node *Node) error { update := false for i, n := range cfg.Nodes { if n.Name == node.Name { diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 86df75e021..b7c3cab1fd 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -39,14 +39,13 @@ const ( // Add adds a new node config to an existing cluster. func Add(cc *config.ClusterConfig, n config.Node) error { - cc.Nodes = append(cc.Nodes, n) - err := config.SaveProfile(cc.Name, cc) + + err := config.SaveNode(cc, &n) if err != nil { return err } err = Start(*cc, n, nil) - return err } From f5bdba6088c272901bba9e6113492d7a006f9eb6 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 16:47:07 -0800 Subject: [PATCH 025/668] fix kvm2 configurator --- pkg/minikube/registry/drvs/kvm2/kvm2.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/minikube/registry/drvs/kvm2/kvm2.go b/pkg/minikube/registry/drvs/kvm2/kvm2.go index a3dbf67193..5dd00c12c3 100644 --- a/pkg/minikube/registry/drvs/kvm2/kvm2.go +++ b/pkg/minikube/registry/drvs/kvm2/kvm2.go @@ -67,8 +67,8 @@ type kvmDriver struct { ConnectionURI string } -func configure(mc config.ClusterConfig) (interface{}, error) { - name := mc.Name +func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { + name := n.Name return kvmDriver{ BaseDriver: &drivers.BaseDriver{ MachineName: name, From feaa9fc3b3549cc7ec358998a614da45e382a632 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 16:50:30 -0800 Subject: [PATCH 026/668] hyperv and none drivers needed fixing too --- pkg/minikube/registry/drvs/hyperv/hyperv.go | 22 ++++++++++----------- pkg/minikube/registry/drvs/none/none.go | 4 ++-- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/minikube/registry/drvs/hyperv/hyperv.go b/pkg/minikube/registry/drvs/hyperv/hyperv.go index 89f63c93f3..841d38d540 100644 --- a/pkg/minikube/registry/drvs/hyperv/hyperv.go +++ b/pkg/minikube/registry/drvs/hyperv/hyperv.go @@ -29,7 +29,7 @@ import ( "github.com/docker/machine/libmachine/drivers" "github.com/pkg/errors" - cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/registry" @@ -52,16 +52,16 @@ func init() { } } -func configure(config cfg.ClusterConfig) (interface{}, error) { - d := hyperv.NewDriver(config.Name, localpath.MiniPath()) - d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) - d.VSwitch = config.HypervVirtualSwitch - if d.VSwitch == "" && config.HypervUseExternalSwitch { - switchName, adapter, err := chooseSwitch(config.HypervExternalAdapter) +func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { + d := hyperv.NewDriver(n.Name, localpath.MiniPath()) + d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) + d.VSwitch = cfg.HypervVirtualSwitch + if d.VSwitch == "" && cfg.HypervUseExternalSwitch { + switchName, adapter, err := chooseSwitch(cfg.HypervExternalAdapter) if err != nil { return nil, errors.Wrapf(err, "failed to choose switch for Hyper-V driver") } - if config.HypervExternalAdapter == "" && switchName == "" { + if cfg.HypervExternalAdapter == "" && switchName == "" { // create a switch on the returned adapter switchName = defaultExternalSwitchName err := createVMSwitch(switchName, adapter) @@ -71,9 +71,9 @@ func configure(config cfg.ClusterConfig) (interface{}, error) { } d.VSwitch = switchName } - d.MemSize = config.Memory - d.CPU = config.CPUs - d.DiskSize = config.DiskSize + d.MemSize = cfg.Memory + d.CPU = cfg.CPUs + d.DiskSize = cfg.DiskSize d.SSHUser = "docker" d.DisableDynamicMemory = true // default to disable dynamic memory as minikube is unlikely to work properly with dynamic memory return d, nil diff --git a/pkg/minikube/registry/drvs/none/none.go b/pkg/minikube/registry/drvs/none/none.go index 4e1ae1a794..4bf39d6d7b 100644 --- a/pkg/minikube/registry/drvs/none/none.go +++ b/pkg/minikube/registry/drvs/none/none.go @@ -42,9 +42,9 @@ func init() { } } -func configure(mc config.ClusterConfig) (interface{}, error) { +func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { return none.NewDriver(none.Config{ - MachineName: mc.Name, + MachineName: n.Name, StorePath: localpath.MiniPath(), ContainerRuntime: mc.KubernetesConfig.ContainerRuntime, }), nil From 3d7215295384af1b6a72fa8aa04d2b757eadc2f6 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 17:28:10 -0800 Subject: [PATCH 027/668] fixing lint and other random incorrect stuff --- cmd/minikube/cmd/delete.go | 3 ++- cmd/minikube/cmd/start.go | 10 ++++++++-- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 2 +- pkg/minikube/cluster/setup.go | 6 ++++-- pkg/minikube/node/node.go | 2 -- pkg/minikube/node/start.go | 8 +++++++- 6 files changed, 22 insertions(+), 9 deletions(-) diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index 9f43331c70..cc74e5b89e 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -229,8 +229,9 @@ func deleteProfile(profile *pkg_config.Profile) error { delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err)) deletionError.Err = delErr e = deletionError + } else { + e = err } - e = err } } if e != nil { diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 0aa5a8691a..194c1c988a 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -355,7 +355,10 @@ func runStart(cmd *cobra.Command, args []string) { numNodes := viper.GetInt(nodes) if numNodes > 1 { - for i := 0; i < numNodes-1; i++ { + if driver.IsKIC(driverName) { + out.T(out.Meh, "The none driver is not compatible with multi-node clusters.") + } + for i := 1; i < numNodes; i++ { nodeName := fmt.Sprintf("%s%d", n.Name, i+1) n := config.Node{ Name: nodeName, @@ -363,7 +366,10 @@ func runStart(cmd *cobra.Command, args []string) { ControlPlane: false, KubernetesVersion: mc.KubernetesConfig.KubernetesVersion, } - node.Add(&mc, n) + err := node.Add(&mc, n) + if err != nil { + exit.WithError("adding node", err) + } } } } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 663e538f8c..6d1110c5de 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -371,7 +371,7 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC fmt.Println(joinCmd) out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd)) if err != nil { - return errors.Wrapf(err, "cmd failed: %s\n%s\n", joinCmd, out) + return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out) } if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet")); err != nil { diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index dc1f46a0cd..ab5bd09d45 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -52,7 +52,6 @@ const ( embedCerts = "embed-certs" keepContext = "keep-context" imageRepository = "image-repository" - containerRuntime = "container-runtime" ) // InitialSetup performs all necessary operations on the initial control plane node when first spinning up a cluster @@ -165,7 +164,10 @@ func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command. } node.IP = ip - config.SaveNode(cfg, node) + err = config.SaveNode(cfg, node) + if err != nil { + exit.WithError("saving node", err) + } return runner, preExists, m, host } diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index b7c3cab1fd..1f2147cbc5 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -30,8 +30,6 @@ const ( waitUntilHealthy = "wait" cacheImageConfigKey = "cache" containerRuntime = "container-runtime" - embedCerts = "embed-certs" - keepContext = "keep-context" mountString = "mount-string" createMount = "mount" waitTimeout = "wait-timeout" diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 22dd8ac868..a57a48a150 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -88,7 +88,10 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo } } - bs.SetupCerts(cc.KubernetesConfig, n) + err = bs.SetupCerts(cc.KubernetesConfig, n) + if err != nil { + exit.WithError("setting up certs", err) + } cp, err := config.PrimaryControlPlane(cc) if err != nil { @@ -99,6 +102,9 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo exit.WithError("Getting bootstrapper", err) } joinCmd, err := cpBs.GenerateToken(cc.KubernetesConfig) + if err != nil { + exit.WithError("generating join token", err) + } return bs.JoinCluster(cc, n, joinCmd) } From 39f03bc925a0d7057c4bc2d1a8d93cc92a3fb32d Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 17:39:41 -0800 Subject: [PATCH 028/668] prepareNone was in the wrong spot --- pkg/minikube/cluster/setup.go | 36 ++++++++++++++++++++++++++++++++ pkg/minikube/node/start.go | 39 ----------------------------------- 2 files changed, 36 insertions(+), 39 deletions(-) diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index ab5bd09d45..d8b5e3ed48 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -43,6 +43,7 @@ import ( "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/proxy" + "k8s.io/minikube/pkg/util" "k8s.io/minikube/pkg/util/retry" ) @@ -83,6 +84,12 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str addons.Start(viper.GetString(config.MachineProfile), existingAddons, config.AddonList) } + // special ops for none , like change minikube directory. + // multinode super doesn't work on the none driver + if cc.Driver == driver.None && len(cc.Nodes) == 1 { + prepareNone() + } + // Skip pre-existing, because we already waited for health if viper.GetBool(waitUntilHealthy) && !preExists { if err := bs.WaitForCluster(cc, viper.GetDuration(waitTimeout)); err != nil { @@ -287,3 +294,32 @@ func tryRegistry(r command.Runner) { out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo}) } } + +// prepareNone prepares the user and host for the joy of the "none" driver +func prepareNone() { + out.T(out.StartingNone, "Configuring local host environment ...") + if viper.GetBool(config.WantNoneDriverWarning) { + out.T(out.Empty, "") + out.WarningT("The 'none' driver provides limited isolation and may reduce system security and reliability.") + out.WarningT("For more information, see:") + out.T(out.URL, "https://minikube.sigs.k8s.io/docs/reference/drivers/none/") + out.T(out.Empty, "") + } + + if os.Getenv("CHANGE_MINIKUBE_NONE_USER") == "" { + home := os.Getenv("HOME") + out.WarningT("kubectl and minikube configuration will be stored in {{.home_folder}}", out.V{"home_folder": home}) + out.WarningT("To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:") + + out.T(out.Empty, "") + out.T(out.Command, "sudo mv {{.home_folder}}/.kube {{.home_folder}}/.minikube $HOME", out.V{"home_folder": home}) + out.T(out.Command, "sudo chown -R $USER $HOME/.kube $HOME/.minikube") + out.T(out.Empty, "") + + out.T(out.Tip, "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true") + } + + if err := util.MaybeChownDirRecursiveToMinikubeUser(localpath.MiniPath()); err != nil { + exit.WithCodeT(exit.Permissions, "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}", out.V{"minikube_dir_path": localpath.MiniPath(), "error": err}) + } +} diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index a57a48a150..b7ebb5f816 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -17,17 +17,13 @@ limitations under the License. package node import ( - "os" - "github.com/spf13/viper" "golang.org/x/sync/errgroup" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/util" ) @@ -75,12 +71,6 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo out.T(out.FailureType, "Unable to load cached images from config file.") } - // special ops for none , like change minikube directory. - // multinode super doesn't work on the none driver - if driverName == driver.None && len(cc.Nodes) == 1 { - prepareNone() - } - // Skip pre-existing, because we already waited for health if viper.GetBool(waitUntilHealthy) && !preExists { if err := bs.WaitForCluster(cc, viper.GetDuration(waitTimeout)); err != nil { @@ -107,32 +97,3 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo } return bs.JoinCluster(cc, n, joinCmd) } - -// prepareNone prepares the user and host for the joy of the "none" driver -func prepareNone() { - out.T(out.StartingNone, "Configuring local host environment ...") - if viper.GetBool(config.WantNoneDriverWarning) { - out.T(out.Empty, "") - out.WarningT("The 'none' driver provides limited isolation and may reduce system security and reliability.") - out.WarningT("For more information, see:") - out.T(out.URL, "https://minikube.sigs.k8s.io/docs/reference/drivers/none/") - out.T(out.Empty, "") - } - - if os.Getenv("CHANGE_MINIKUBE_NONE_USER") == "" { - home := os.Getenv("HOME") - out.WarningT("kubectl and minikube configuration will be stored in {{.home_folder}}", out.V{"home_folder": home}) - out.WarningT("To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:") - - out.T(out.Empty, "") - out.T(out.Command, "sudo mv {{.home_folder}}/.kube {{.home_folder}}/.minikube $HOME", out.V{"home_folder": home}) - out.T(out.Command, "sudo chown -R $USER $HOME/.kube $HOME/.minikube") - out.T(out.Empty, "") - - out.T(out.Tip, "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true") - } - - if err := util.MaybeChownDirRecursiveToMinikubeUser(localpath.MiniPath()); err != nil { - exit.WithCodeT(exit.Permissions, "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}", out.V{"minikube_dir_path": localpath.MiniPath(), "error": err}) - } -} From 0fadf91d2cbd79da48549e35fcc256f5045a49f3 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 25 Feb 2020 16:36:53 -0800 Subject: [PATCH 029/668] i think it works? --- cmd/minikube/cmd/delete.go | 39 ++++----- cmd/minikube/cmd/logs.go | 2 +- cmd/minikube/cmd/status.go | 87 ++++++++++++------- pkg/drivers/hyperkit/driver.go | 3 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 3 +- pkg/minikube/cluster/cluster.go | 4 +- pkg/minikube/cluster/setup.go | 2 +- pkg/minikube/node/start.go | 4 +- pkg/minikube/registry/drvs/docker/docker.go | 2 +- .../registry/drvs/hyperkit/hyperkit.go | 3 +- pkg/minikube/registry/drvs/hyperv/hyperv.go | 2 +- pkg/minikube/registry/drvs/kvm2/kvm2.go | 2 +- .../registry/drvs/parallels/parallels.go | 2 +- pkg/minikube/registry/drvs/podman/podman.go | 2 +- .../registry/drvs/virtualbox/virtualbox.go | 2 +- pkg/minikube/registry/drvs/vmware/vmware.go | 2 +- .../drvs/vmwarefusion/vmwarefusion.go | 2 +- pkg/provision/provision.go | 3 +- 18 files changed, 94 insertions(+), 72 deletions(-) diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index cc74e5b89e..8ed30668b4 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -221,21 +221,14 @@ func deleteProfile(profile *pkg_config.Profile) error { } if err == nil && driver.BareMetal(cc.Driver) { - var e error - for _, n := range cc.Nodes { - if err := uninstallKubernetes(api, profile.Name, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper), n.Name); err != nil { - deletionError, ok := err.(DeletionError) - if ok { - delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err)) - deletionError.Err = delErr - e = deletionError - } else { - e = err - } + if err := uninstallKubernetes(api, profile.Name, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper), cc.Nodes[0].Name); err != nil { + deletionError, ok := err.(DeletionError) + if ok { + delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err)) + deletionError.Err = delErr + return deletionError } - } - if e != nil { - return e + return err } } @@ -243,13 +236,15 @@ func deleteProfile(profile *pkg_config.Profile) error { out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err}) } - if err = machine.DeleteHost(api, profile.Name); err != nil { - switch errors.Cause(err).(type) { - case mcnerror.ErrHostDoesNotExist: - glog.Infof("%s cluster does not exist. Proceeding ahead with cleanup.", profile.Name) - default: - out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err}) - out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name}) + for _, n := range cc.Nodes { + if err = machine.DeleteHost(api, n.Name); err != nil { + switch errors.Cause(err).(type) { + case mcnerror.ErrHostDoesNotExist: + glog.Infof("%s cluster does not exist. Proceeding ahead with cleanup.", profile.Name) + default: + out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err}) + out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name}) + } } } @@ -311,7 +306,7 @@ func profileDeletionErr(profileName string, additionalInfo string) error { func uninstallKubernetes(api libmachine.API, profile string, kc pkg_config.KubernetesConfig, bsName string, nodeName string) error { out.T(out.Resetting, "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...", out.V{"kubernetes_version": kc.KubernetesVersion, "bootstrapper_name": bsName}) - clusterBootstrapper, err := cluster.Bootstrapper(api, bsName, nodeName) + clusterBootstrapper, err := cluster.Bootstrapper(api, bsName, profile, nodeName) if err != nil { return DeletionError{Err: fmt.Errorf("unable to get bootstrapper: %v", err), Errtype: Fatal} } diff --git a/cmd/minikube/cmd/logs.go b/cmd/minikube/cmd/logs.go index 934ca9c1b0..7b6ff5f757 100644 --- a/cmd/minikube/cmd/logs.go +++ b/cmd/minikube/cmd/logs.go @@ -67,7 +67,7 @@ var logsCmd = &cobra.Command{ if err != nil { exit.WithError("command runner", err) } - bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper), viper.GetString(config.MachineProfile)) + bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper), viper.GetString(config.MachineProfile), viper.GetString(config.MachineProfile)) if err != nil { exit.WithError("Error getting cluster bootstrapper", err) } diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 0fb1c79284..dfd62da6eb 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -55,10 +55,13 @@ const ( // Nonexistent means nonexistent Nonexistent = "Nonexistent" // ~state.None + // Irrelevant is used for statuses that aren't meaningful for worker nodes + Irrelevant = "Irrelevant" ) // Status holds string representations of component states type Status struct { + Name string Host string Kubelet string APIServer string @@ -69,7 +72,8 @@ const ( minikubeNotRunningStatusFlag = 1 << 0 clusterNotRunningStatusFlag = 1 << 1 k8sNotRunningStatusFlag = 1 << 2 - defaultStatusFormat = `host: {{.Host}} + defaultStatusFormat = `{{.Name}} +host: {{.Host}} kubelet: {{.Kubelet}} apiserver: {{.APIServer}} kubeconfig: {{.Kubeconfig}} @@ -95,26 +99,35 @@ var statusCmd = &cobra.Command{ } defer api.Close() - machineName := viper.GetString(config.MachineProfile) - st, err := status(api, machineName) + cluster := viper.GetString(config.MachineProfile) + cc, err := config.Load(cluster) if err != nil { - glog.Errorf("status error: %v", err) - } - if st.Host == Nonexistent { - glog.Errorf("The %q cluster does not exist!", machineName) + exit.WithError("getting config", err) } - switch strings.ToLower(output) { - case "text": - if err := statusText(st, os.Stdout); err != nil { - exit.WithError("status text failure", err) + var st *Status + for _, n := range cc.Nodes { + machineName := fmt.Sprintf("%s-%s", cluster, n.Name) + st, err = status(api, machineName, n.ControlPlane) + if err != nil { + glog.Errorf("status error: %v", err) } - case "json": - if err := statusJSON(st, os.Stdout); err != nil { - exit.WithError("status json failure", err) + if st.Host == Nonexistent { + glog.Errorf("The %q host does not exist!", machineName) + } + + switch strings.ToLower(output) { + case "text": + if err := statusText(st, os.Stdout); err != nil { + exit.WithError("status text failure", err) + } + case "json": + if err := statusJSON(st, os.Stdout); err != nil { + exit.WithError("status json failure", err) + } + default: + exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output)) } - default: - exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output)) } os.Exit(exitCode(st)) @@ -126,17 +139,22 @@ func exitCode(st *Status) int { if st.Host != state.Running.String() { c |= minikubeNotRunningStatusFlag } - if st.APIServer != state.Running.String() || st.Kubelet != state.Running.String() { + if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() { c |= clusterNotRunningStatusFlag } - if st.Kubeconfig != Configured { + if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant { c |= k8sNotRunningStatusFlag } return c } -func status(api libmachine.API, name string) (*Status, error) { +func status(api libmachine.API, name string, controlPlane bool) (*Status, error) { + + profile := strings.Split(name, "-")[0] + node := strings.Split(name, "-")[1] + st := &Status{ + Name: node, Host: Nonexistent, APIServer: Nonexistent, Kubelet: Nonexistent, @@ -179,10 +197,17 @@ func status(api libmachine.API, name string) (*Status, error) { } st.Kubeconfig = Misconfigured - ok, err := kubeconfig.IsClusterInConfig(ip, name) - glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err) - if ok { - st.Kubeconfig = Configured + if !controlPlane { + st.Kubeconfig = Irrelevant + st.APIServer = Irrelevant + } + + if st.Kubeconfig != Irrelevant { + ok, err := kubeconfig.IsClusterInConfig(ip, profile) + glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err) + if ok { + st.Kubeconfig = Configured + } } host, err := machine.CheckIfHostExistsAndLoad(api, name) @@ -205,14 +230,16 @@ func status(api libmachine.API, name string) (*Status, error) { st.Kubelet = stk.String() } - sta, err := kverify.APIServerStatus(cr, ip, port) - glog.Infof("%s apiserver status = %s (err=%v)", name, stk, err) + if st.APIServer != Irrelevant { + sta, err := kverify.APIServerStatus(cr, ip, port) + glog.Infof("%s apiserver status = %s (err=%v)", name, stk, err) - if err != nil { - glog.Errorln("Error apiserver status:", err) - st.APIServer = state.Error.String() - } else { - st.APIServer = sta.String() + if err != nil { + glog.Errorln("Error apiserver status:", err) + st.APIServer = state.Error.String() + } else { + st.APIServer = sta.String() + } } return st, nil diff --git a/pkg/drivers/hyperkit/driver.go b/pkg/drivers/hyperkit/driver.go index 9659237408..d3d1031629 100644 --- a/pkg/drivers/hyperkit/driver.go +++ b/pkg/drivers/hyperkit/driver.go @@ -65,7 +65,6 @@ type Driver struct { UUID string VpnKitSock string VSockPorts []string - ClusterName string } // NewDriver creates a new driver for a host @@ -200,7 +199,7 @@ func (d *Driver) Restart() error { } func (d *Driver) createHost() (*hyperkit.HyperKit, error) { - stateDir := filepath.Join(d.StorePath, "machines", d.ClusterName, d.MachineName) + stateDir := filepath.Join(d.StorePath, "machines", d.MachineName) h, err := hyperkit.New("", d.VpnKitSock, stateDir) if err != nil { return nil, errors.Wrap(err, "new-ing Hyperkit") diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 6d1110c5de..5ae6c09033 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -36,6 +36,7 @@ import ( "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" + "github.com/spf13/viper" "k8s.io/client-go/kubernetes" kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/minikube/pkg/drivers/kic" @@ -73,7 +74,7 @@ func NewBootstrapper(api libmachine.API, name string) (*Bootstrapper, error) { if err != nil { return nil, errors.Wrap(err, "command runner") } - return &Bootstrapper{c: runner, contextName: name, k8sClient: nil}, nil + return &Bootstrapper{c: runner, contextName: viper.GetString(config.MachineProfile), k8sClient: nil}, nil } // GetKubeletStatus returns the kubelet status diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index a2b9e06613..a38a4cf8b6 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -42,12 +42,12 @@ func init() { } // Bootstrapper returns a new bootstrapper for the cluster -func Bootstrapper(api libmachine.API, bootstrapperName string, machineName string) (bootstrapper.Bootstrapper, error) { +func Bootstrapper(api libmachine.API, bootstrapperName string, cluster string, nodeName string) (bootstrapper.Bootstrapper, error) { var b bootstrapper.Bootstrapper var err error switch bootstrapperName { case bootstrapper.Kubeadm: - b, err = kubeadm.NewBootstrapper(api, machineName) + b, err = kubeadm.NewBootstrapper(api, fmt.Sprintf("%s-%s", cluster, nodeName)) if err != nil { return nil, errors.Wrap(err, "getting a new kubeadm bootstrapper") } diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index d8b5e3ed48..b784cb01e9 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -103,7 +103,7 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str // setupKubeAdm adds any requested files into the VM before Kubernetes is started func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper { - bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), n.Name) + bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg.Name, n.Name) if err != nil { exit.WithError("Failed to get bootstrapper", err) } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index b7ebb5f816..6bf1aef74a 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -42,7 +42,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo runner, preExists, mAPI, _ := cluster.StartMachine(&cc, &n) defer mAPI.Close() - bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), n.Name) + bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc.Name, n.Name) if err != nil { exit.WithError("Failed to get bootstrapper", err) } @@ -87,7 +87,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo if err != nil { exit.WithError("Getting primary control plane", err) } - cpBs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cp.Name) + cpBs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc.Name, cp.Name) if err != nil { exit.WithError("Getting bootstrapper", err) } diff --git a/pkg/minikube/registry/drvs/docker/docker.go b/pkg/minikube/registry/drvs/docker/docker.go index d5278a8f92..4f3acbad62 100644 --- a/pkg/minikube/registry/drvs/docker/docker.go +++ b/pkg/minikube/registry/drvs/docker/docker.go @@ -45,7 +45,7 @@ func init() { func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { return kic.NewDriver(kic.Config{ - MachineName: n.Name, + MachineName: fmt.Sprintf("%s-%s", mc.Name, n.Name), StorePath: localpath.MiniPath(), ImageDigest: kic.BaseImage, CPU: mc.CPUs, diff --git a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go index b9e4b4f09f..0f0609176e 100644 --- a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go +++ b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go @@ -65,11 +65,10 @@ func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { return &hyperkit.Driver{ BaseDriver: &drivers.BaseDriver{ - MachineName: cfg.Name, + MachineName: fmt.Sprintf("%s-%s", cfg.Name, n.Name), StorePath: localpath.MiniPath(), SSHUser: "docker", }, - ClusterName: cfg.Name, Boot2DockerURL: cfg.Downloader.GetISOFileURI(cfg.MinikubeISO), DiskSize: cfg.DiskSize, Memory: cfg.Memory, diff --git a/pkg/minikube/registry/drvs/hyperv/hyperv.go b/pkg/minikube/registry/drvs/hyperv/hyperv.go index 841d38d540..fccb1b9076 100644 --- a/pkg/minikube/registry/drvs/hyperv/hyperv.go +++ b/pkg/minikube/registry/drvs/hyperv/hyperv.go @@ -53,7 +53,7 @@ func init() { } func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { - d := hyperv.NewDriver(n.Name, localpath.MiniPath()) + d := hyperv.NewDriver(fmt.Sprintf("%s-%s", mc.Name, n.Name), localpath.MiniPath()) d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) d.VSwitch = cfg.HypervVirtualSwitch if d.VSwitch == "" && cfg.HypervUseExternalSwitch { diff --git a/pkg/minikube/registry/drvs/kvm2/kvm2.go b/pkg/minikube/registry/drvs/kvm2/kvm2.go index 5dd00c12c3..eb1e2d1773 100644 --- a/pkg/minikube/registry/drvs/kvm2/kvm2.go +++ b/pkg/minikube/registry/drvs/kvm2/kvm2.go @@ -68,7 +68,7 @@ type kvmDriver struct { } func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { - name := n.Name + name := fmt.Sprintf("%s-%s", mc.Name, n.Name) return kvmDriver{ BaseDriver: &drivers.BaseDriver{ MachineName: name, diff --git a/pkg/minikube/registry/drvs/parallels/parallels.go b/pkg/minikube/registry/drvs/parallels/parallels.go index 29095a6226..a2ae347949 100644 --- a/pkg/minikube/registry/drvs/parallels/parallels.go +++ b/pkg/minikube/registry/drvs/parallels/parallels.go @@ -45,7 +45,7 @@ func init() { } func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { - d := parallels.NewDriver(n.Name, localpath.MiniPath()).(*parallels.Driver) + d := parallels.NewDriver(fmt.Sprintf("%s-%s", cfg.Name, n.Name), localpath.MiniPath()).(*parallels.Driver) d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) d.Memory = cfg.Memory d.CPU = cfg.CPUs diff --git a/pkg/minikube/registry/drvs/podman/podman.go b/pkg/minikube/registry/drvs/podman/podman.go index eab3200b83..e05c6671f2 100644 --- a/pkg/minikube/registry/drvs/podman/podman.go +++ b/pkg/minikube/registry/drvs/podman/podman.go @@ -51,7 +51,7 @@ func init() { func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { return kic.NewDriver(kic.Config{ - MachineName: n.Name, + MachineName: fmt.Sprintf("%s-%s", mc.Name, n.Name), StorePath: localpath.MiniPath(), ImageDigest: strings.Split(kic.BaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest. CPU: mc.CPUs, diff --git a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go index 7dd13af948..fcbbcc9440 100644 --- a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go +++ b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go @@ -50,7 +50,7 @@ func init() { } func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { - d := virtualbox.NewDriver(n.Name, localpath.MiniPath()) + d := virtualbox.NewDriver(fmt.Sprintf("%s-%s", mc.Name, n.Name), localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory d.CPU = mc.CPUs diff --git a/pkg/minikube/registry/drvs/vmware/vmware.go b/pkg/minikube/registry/drvs/vmware/vmware.go index b6a90929c9..33b3e7c586 100644 --- a/pkg/minikube/registry/drvs/vmware/vmware.go +++ b/pkg/minikube/registry/drvs/vmware/vmware.go @@ -40,7 +40,7 @@ func init() { } func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { - d := vmwcfg.NewConfig(n.Name, localpath.MiniPath()) + d := vmwcfg.NewConfig(fmt.Sprintf("%s-%s", mc.Name, n.Name), localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory d.CPU = mc.CPUs diff --git a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go index adc50d70e8..47047ffe3f 100644 --- a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go +++ b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go @@ -45,7 +45,7 @@ func init() { } func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { - d := vmwarefusion.NewDriver(n.Name, localpath.MiniPath()).(*vmwarefusion.Driver) + d := vmwarefusion.NewDriver(fmt.Sprintf("%s-%s", cfg.Name, n.Name), localpath.MiniPath()).(*vmwarefusion.Driver) d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) d.Memory = cfg.Memory d.CPU = cfg.CPUs diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index 52fb131960..ff5f08fef8 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -195,7 +195,8 @@ func setRemoteAuthOptions(p provision.Provisioner) auth.Options { } func setContainerRuntimeOptions(name string, p miniProvisioner) error { - c, err := config.Load(name) + cluster := strings.Split(name, "-")[0] + c, err := config.Load(cluster) if err != nil { return errors.Wrap(err, "getting cluster config") } From e3826a5e4756714b69b305b4a0f4101fcf5b5b94 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 25 Feb 2020 17:18:25 -0800 Subject: [PATCH 030/668] fix ip command --- cmd/minikube/cmd/ip.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/minikube/cmd/ip.go b/cmd/minikube/cmd/ip.go index 5d00182dde..e4ef171a1a 100644 --- a/cmd/minikube/cmd/ip.go +++ b/cmd/minikube/cmd/ip.go @@ -17,6 +17,8 @@ limitations under the License. package cmd import ( + "fmt" + "github.com/docker/machine/libmachine/mcnerror" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -43,11 +45,12 @@ var ipCmd = &cobra.Command{ if err != nil { exit.WithError("Error getting config", err) } - host, err := api.Load(cc.Name) + machineName := fmt.Sprintf("%s-%s", cc.Name, cc.Nodes[0].Name) + host, err := api.Load(machineName) if err != nil { switch err := errors.Cause(err).(type) { case mcnerror.ErrHostDoesNotExist: - exit.WithCodeT(exit.NoInput, `"{{.profile_name}}" host does not exist, unable to show an IP`, out.V{"profile_name": cc.Name}) + exit.WithCodeT(exit.NoInput, `"{{.profile_name}}" host does not exist, unable to show an IP`, out.V{"profile_name": machineName}) default: exit.WithError("Error getting host", err) } From 676588f6b6f811e6941314d8cc40ef535743ff9b Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 25 Feb 2020 23:47:08 -0800 Subject: [PATCH 031/668] refactor machine name creation into a function --- cmd/minikube/cmd/ip.go | 5 ++--- cmd/minikube/cmd/ssh.go | 13 +++++++++++-- cmd/minikube/cmd/status.go | 3 ++- cmd/minikube/cmd/status_test.go | 12 ++++++------ pkg/minikube/cluster/cluster.go | 3 ++- pkg/minikube/driver/driver.go | 5 +++++ pkg/minikube/machine/cluster_test.go | 11 +++++++---- pkg/minikube/machine/machine.go | 5 +---- pkg/minikube/registry/drvs/docker/docker.go | 2 +- pkg/minikube/registry/drvs/hyperkit/hyperkit.go | 2 +- pkg/minikube/registry/drvs/hyperv/hyperv.go | 2 +- pkg/minikube/registry/drvs/kvm2/kvm2.go | 2 +- pkg/minikube/registry/drvs/parallels/parallels.go | 2 +- pkg/minikube/registry/drvs/podman/podman.go | 2 +- pkg/minikube/registry/drvs/virtualbox/virtualbox.go | 2 +- pkg/minikube/registry/drvs/vmware/vmware.go | 2 +- .../registry/drvs/vmwarefusion/vmwarefusion.go | 2 +- 17 files changed, 45 insertions(+), 30 deletions(-) diff --git a/cmd/minikube/cmd/ip.go b/cmd/minikube/cmd/ip.go index e4ef171a1a..a6b607eb12 100644 --- a/cmd/minikube/cmd/ip.go +++ b/cmd/minikube/cmd/ip.go @@ -17,13 +17,12 @@ limitations under the License. package cmd import ( - "fmt" - "github.com/docker/machine/libmachine/mcnerror" "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" @@ -45,7 +44,7 @@ var ipCmd = &cobra.Command{ if err != nil { exit.WithError("Error getting config", err) } - machineName := fmt.Sprintf("%s-%s", cc.Name, cc.Nodes[0].Name) + machineName := driver.MachineName(cc.Name, cc.Nodes[0].Name) host, err := api.Load(machineName) if err != nil { switch err := errors.Cause(err).(type) { diff --git a/cmd/minikube/cmd/ssh.go b/cmd/minikube/cmd/ssh.go index 84dead06e1..2ae2e5febd 100644 --- a/cmd/minikube/cmd/ssh.go +++ b/cmd/minikube/cmd/ssh.go @@ -45,7 +45,15 @@ var sshCmd = &cobra.Command{ if err != nil { exit.WithError("Error getting config", err) } - host, err := machine.CheckIfHostExistsAndLoad(api, cc.Name) + + if nodeName == "" { + cp, err := config.PrimaryControlPlane(*cc) + if err != nil { + exit.WithError("Getting primary control plane", err) + } + nodeName = cp.Name + } + host, err := machine.CheckIfHostExistsAndLoad(api, driver.MachineName(cc.Name, nodeName)) if err != nil { exit.WithError("Error getting host", err) } @@ -58,7 +66,7 @@ var sshCmd = &cobra.Command{ ssh.SetDefaultClient(ssh.External) } - err = machine.CreateSSHShell(api, args) + err = machine.CreateSSHShell(api, driver.MachineName(cc.Name, nodeName), args) if err != nil { // This is typically due to a non-zero exit code, so no need for flourish. out.ErrLn("ssh: %v", err) @@ -70,4 +78,5 @@ var sshCmd = &cobra.Command{ func init() { sshCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.") + sshCmd.Flags().StringVarP(&nodeName, "node", "n", "", "The node to ssh into. Defaults to the primary control plane.") } diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index dfd62da6eb..b7e3005c23 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -34,6 +34,7 @@ import ( "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/machine" @@ -107,7 +108,7 @@ var statusCmd = &cobra.Command{ var st *Status for _, n := range cc.Nodes { - machineName := fmt.Sprintf("%s-%s", cluster, n.Name) + machineName := driver.MachineName(cluster, n.Name) st, err = status(api, machineName, n.ControlPlane) if err != nil { glog.Errorf("status error: %v", err) diff --git a/cmd/minikube/cmd/status_test.go b/cmd/minikube/cmd/status_test.go index ef414631f8..44f4133dfd 100644 --- a/cmd/minikube/cmd/status_test.go +++ b/cmd/minikube/cmd/status_test.go @@ -51,18 +51,18 @@ func TestStatusText(t *testing.T) { }{ { name: "ok", - state: &Status{Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured}, - want: "host: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n", + state: &Status{Name: "minikube", Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured}, + want: "minikube\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n", }, { name: "paused", - state: &Status{Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured}, - want: "host: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n", + state: &Status{Name: "minikube", Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured}, + want: "minikube\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n", }, { name: "down", - state: &Status{Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured}, - want: "host: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", + state: &Status{Name: "minikube", Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured}, + want: "minikube\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", }, } for _, tc := range tests { diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index a38a4cf8b6..fd45d789aa 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -26,6 +26,7 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" ) @@ -47,7 +48,7 @@ func Bootstrapper(api libmachine.API, bootstrapperName string, cluster string, n var err error switch bootstrapperName { case bootstrapper.Kubeadm: - b, err = kubeadm.NewBootstrapper(api, fmt.Sprintf("%s-%s", cluster, nodeName)) + b, err = kubeadm.NewBootstrapper(api, driver.MachineName(cluster, nodeName)) if err != nil { return nil, errors.Wrap(err, "getting a new kubeadm bootstrapper") } diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index eb122f3f2f..21ebc7c432 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -108,6 +108,11 @@ func BareMetal(name string) bool { return name == None || name == Mock } +// MachineName return the name of the machine given proper config +func MachineName(cluster string, node string) string { + return fmt.Sprintf("%s-%s", cluster, node) +} + // NeedsRoot returns true if driver needs to run with root privileges func NeedsRoot(name string) bool { return name == None || name == Podman diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 449acd729e..2b26215c6c 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -427,19 +427,22 @@ func TestCreateSSHShell(t *testing.T) { t.Fatalf("Error starting ssh server: %v", err) } + m := viper.GetString("profile") + d := &tests.MockDriver{ Port: port, CurrentState: state.Running, BaseDriver: drivers.BaseDriver{ - IPAddress: "127.0.0.1", - SSHKeyPath: "", + IPAddress: "127.0.0.1", + SSHKeyPath: "", + MachineName: m, }, T: t, } - api.Hosts[viper.GetString("profile")] = &host.Host{Driver: d} + api.Hosts[m] = &host.Host{Driver: d} cliArgs := []string{"exit"} - if err := CreateSSHShell(api, cliArgs); err != nil { + if err := CreateSSHShell(api, m, cliArgs); err != nil { t.Fatalf("Error running ssh command: %v", err) } diff --git a/pkg/minikube/machine/machine.go b/pkg/minikube/machine/machine.go index 7295b33dab..5b65fd59d1 100644 --- a/pkg/minikube/machine/machine.go +++ b/pkg/minikube/machine/machine.go @@ -25,8 +25,6 @@ import ( "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/localpath" ) @@ -124,8 +122,7 @@ func machineDirs(miniHome ...string) (dirs []string, err error) { } // CreateSSHShell creates a new SSH shell / client -func CreateSSHShell(api libmachine.API, args []string) error { - machineName := viper.GetString(config.MachineProfile) +func CreateSSHShell(api libmachine.API, machineName string, args []string) error { host, err := CheckIfHostExistsAndLoad(api, machineName) if err != nil { return errors.Wrap(err, "host exists and load") diff --git a/pkg/minikube/registry/drvs/docker/docker.go b/pkg/minikube/registry/drvs/docker/docker.go index 4f3acbad62..af8a8f27cd 100644 --- a/pkg/minikube/registry/drvs/docker/docker.go +++ b/pkg/minikube/registry/drvs/docker/docker.go @@ -45,7 +45,7 @@ func init() { func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { return kic.NewDriver(kic.Config{ - MachineName: fmt.Sprintf("%s-%s", mc.Name, n.Name), + MachineName: driver.MachineName(mc.Name, n.Name), StorePath: localpath.MiniPath(), ImageDigest: kic.BaseImage, CPU: mc.CPUs, diff --git a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go index 0f0609176e..5fe099b79f 100644 --- a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go +++ b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go @@ -65,7 +65,7 @@ func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { return &hyperkit.Driver{ BaseDriver: &drivers.BaseDriver{ - MachineName: fmt.Sprintf("%s-%s", cfg.Name, n.Name), + MachineName: driver.MachineName(cfg.Name, n.Name), StorePath: localpath.MiniPath(), SSHUser: "docker", }, diff --git a/pkg/minikube/registry/drvs/hyperv/hyperv.go b/pkg/minikube/registry/drvs/hyperv/hyperv.go index fccb1b9076..55440fb711 100644 --- a/pkg/minikube/registry/drvs/hyperv/hyperv.go +++ b/pkg/minikube/registry/drvs/hyperv/hyperv.go @@ -53,7 +53,7 @@ func init() { } func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { - d := hyperv.NewDriver(fmt.Sprintf("%s-%s", mc.Name, n.Name), localpath.MiniPath()) + d := hyperv.NewDriver(driver.MachineName(mc.Name, n.Name), localpath.MiniPath()) d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) d.VSwitch = cfg.HypervVirtualSwitch if d.VSwitch == "" && cfg.HypervUseExternalSwitch { diff --git a/pkg/minikube/registry/drvs/kvm2/kvm2.go b/pkg/minikube/registry/drvs/kvm2/kvm2.go index eb1e2d1773..90071e6bcf 100644 --- a/pkg/minikube/registry/drvs/kvm2/kvm2.go +++ b/pkg/minikube/registry/drvs/kvm2/kvm2.go @@ -68,7 +68,7 @@ type kvmDriver struct { } func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { - name := fmt.Sprintf("%s-%s", mc.Name, n.Name) + name := driver.MachineName(mc.Name, n.Name) return kvmDriver{ BaseDriver: &drivers.BaseDriver{ MachineName: name, diff --git a/pkg/minikube/registry/drvs/parallels/parallels.go b/pkg/minikube/registry/drvs/parallels/parallels.go index a2ae347949..8f892cb0b0 100644 --- a/pkg/minikube/registry/drvs/parallels/parallels.go +++ b/pkg/minikube/registry/drvs/parallels/parallels.go @@ -45,7 +45,7 @@ func init() { } func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { - d := parallels.NewDriver(fmt.Sprintf("%s-%s", cfg.Name, n.Name), localpath.MiniPath()).(*parallels.Driver) + d := parallels.NewDriver(driver.MachineName(cfg.Name, n.Name), localpath.MiniPath()).(*parallels.Driver) d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) d.Memory = cfg.Memory d.CPU = cfg.CPUs diff --git a/pkg/minikube/registry/drvs/podman/podman.go b/pkg/minikube/registry/drvs/podman/podman.go index e05c6671f2..f2c9a0329f 100644 --- a/pkg/minikube/registry/drvs/podman/podman.go +++ b/pkg/minikube/registry/drvs/podman/podman.go @@ -51,7 +51,7 @@ func init() { func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { return kic.NewDriver(kic.Config{ - MachineName: fmt.Sprintf("%s-%s", mc.Name, n.Name), + MachineName: driver.MachineName(mc.Name, n.Name), StorePath: localpath.MiniPath(), ImageDigest: strings.Split(kic.BaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest. CPU: mc.CPUs, diff --git a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go index fcbbcc9440..04c084c249 100644 --- a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go +++ b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go @@ -50,7 +50,7 @@ func init() { } func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { - d := virtualbox.NewDriver(fmt.Sprintf("%s-%s", mc.Name, n.Name), localpath.MiniPath()) + d := virtualbox.NewDriver(driver.MachineName(mc.Name, n.Name), localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory d.CPU = mc.CPUs diff --git a/pkg/minikube/registry/drvs/vmware/vmware.go b/pkg/minikube/registry/drvs/vmware/vmware.go index 33b3e7c586..65dceda11b 100644 --- a/pkg/minikube/registry/drvs/vmware/vmware.go +++ b/pkg/minikube/registry/drvs/vmware/vmware.go @@ -40,7 +40,7 @@ func init() { } func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { - d := vmwcfg.NewConfig(fmt.Sprintf("%s-%s", mc.Name, n.Name), localpath.MiniPath()) + d := vmwcfg.NewConfig(driver.MachineName(mc.Name, n.Name), localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory d.CPU = mc.CPUs diff --git a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go index 47047ffe3f..5f73cb6949 100644 --- a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go +++ b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go @@ -45,7 +45,7 @@ func init() { } func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { - d := vmwarefusion.NewDriver(fmt.Sprintf("%s-%s", cfg.Name, n.Name), localpath.MiniPath()).(*vmwarefusion.Driver) + d := vmwarefusion.NewDriver(driver.MachineName(cfg.Name, n.Name), localpath.MiniPath()).(*vmwarefusion.Driver) d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) d.Memory = cfg.Memory d.CPU = cfg.CPUs From d7df027fb8a56b6daa93bf2d898e12ca9dc97043 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 26 Feb 2020 15:26:05 -0800 Subject: [PATCH 032/668] fix delete and stop --- cmd/minikube/cmd/delete.go | 7 ++- cmd/minikube/cmd/stop.go | 44 +++++++------------ pkg/drivers/hyperkit/driver.go | 1 - pkg/minikube/bootstrapper/bootstrapper.go | 2 +- .../bootstrapper/bsutil/kverify/kverify.go | 2 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 4 +- pkg/minikube/cluster/setup.go | 2 +- pkg/minikube/node/start.go | 2 +- 8 files changed, 25 insertions(+), 39 deletions(-) diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index 8ed30668b4..9169a4fa71 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -142,10 +142,9 @@ func runDelete(cmd *cobra.Command, args []string) { exit.UsageT("usage: minikube delete") } - profileName := viper.GetString(pkg_config.MachineProfile) - profile, err := pkg_config.LoadProfile(profileName) + profile, err := pkg_config.LoadProfile(profileFlag) if err != nil { - out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": profileName}) + out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": profileFlag}) } errs := DeleteProfiles([]*pkg_config.Profile{profile}) @@ -237,7 +236,7 @@ func deleteProfile(profile *pkg_config.Profile) error { } for _, n := range cc.Nodes { - if err = machine.DeleteHost(api, n.Name); err != nil { + if err = machine.DeleteHost(api, driver.MachineName(profile.Name, n.Name)); err != nil { switch errors.Cause(err).(type) { case mcnerror.ErrHostDoesNotExist: glog.Infof("%s cluster does not exist. Proceeding ahead with cleanup.", profile.Name) diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index 0c5ae89e71..a1bb3e782c 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -17,19 +17,15 @@ limitations under the License. package cmd import ( - "time" - - "github.com/docker/machine/libmachine/mcnerror" - "github.com/golang/glog" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/config" pkg_config "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/util/retry" ) // stopCmd represents the stop command @@ -50,31 +46,23 @@ func runStop(cmd *cobra.Command, args []string) { } defer api.Close() - nonexistent := false - stop := func() (err error) { - err = machine.StopHost(api, profile) - if err == nil { - return nil - } - glog.Warningf("stop host returned error: %v", err) - - switch err := errors.Cause(err).(type) { - case mcnerror.ErrHostDoesNotExist: - out.T(out.Meh, `"{{.profile_name}}" does not exist, nothing to stop`, out.V{"profile_name": profile}) - nonexistent = true - return nil - default: - return err + cc, err := config.Load(profile) + if err != nil { + exit.WithError("Error retrieving config", err) + } + + // TODO replace this back with expo backoff + for _, n := range cc.Nodes { + err := machine.StopHost(api, driver.MachineName(profile, n.Name)) + if err != nil { + exit.WithError("Unable to stop VM", err) } + /*if err := retry.Expo(fn, 5*time.Second, 3*time.Minute, 5); err != nil { + exit.WithError("Unable to stop VM", err) + }*/ } - if err := retry.Expo(stop, 5*time.Second, 3*time.Minute, 5); err != nil { - exit.WithError("Unable to stop VM", err) - } - - if !nonexistent { - out.T(out.Stopped, `"{{.profile_name}}" stopped.`, out.V{"profile_name": profile}) - } + out.T(out.Stopped, `"{{.profile_name}}" stopped.`, out.V{"profile_name": profile}) if err := killMountProcess(); err != nil { out.T(out.WarningType, "Unable to kill mount process: {{.error}}", out.V{"error": err}) diff --git a/pkg/drivers/hyperkit/driver.go b/pkg/drivers/hyperkit/driver.go index d3d1031629..bf775240e6 100644 --- a/pkg/drivers/hyperkit/driver.go +++ b/pkg/drivers/hyperkit/driver.go @@ -519,7 +519,6 @@ func (d *Driver) sendSignal(s os.Signal) error { func (d *Driver) getPid() int { pidPath := d.ResolveStorePath(machineFileName) - log.Debugf("PIDPATH=%s", pidPath) f, err := os.Open(pidPath) if err != nil { log.Warnf("Error reading pid file: %v", err) diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 6bb03fa986..f250423833 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -39,7 +39,7 @@ type Bootstrapper interface { StartCluster(config.ClusterConfig) error UpdateCluster(config.ClusterConfig) error DeleteCluster(config.KubernetesConfig) error - WaitForCluster(config.ClusterConfig, time.Duration) error + WaitForNode(config.ClusterConfig, config.Node, time.Duration) error JoinCluster(config.ClusterConfig, config.Node, string) error UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error GenerateToken(config.KubernetesConfig) (string, error) diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go index aa076cecb9..bc4f03e8b0 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go @@ -67,7 +67,7 @@ func apiServerPID(cr command.Runner) (int, error) { return strconv.Atoi(s) } -// SystemPods verifies essential pods for running kurnetes is running +// SystemPods verifies essential pods for running kubernetes is running func SystemPods(client *kubernetes.Clientset, start time.Time, timeout time.Duration) error { glog.Info("waiting for kube-system pods to appear ...") pStart := time.Now() diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 5ae6c09033..7ccef6ee40 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -249,8 +249,8 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error return c, err } -// WaitForCluster blocks until the cluster appears to be healthy -func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Duration) error { +// WaitForCluster blocks until the node appears to be healthy +func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, timeout time.Duration) error { start := time.Now() out.T(out.Waiting, "Waiting for cluster to come online ...") cp, err := config.PrimaryControlPlane(cfg) diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index b784cb01e9..f09a8188c3 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -92,7 +92,7 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str // Skip pre-existing, because we already waited for health if viper.GetBool(waitUntilHealthy) && !preExists { - if err := bs.WaitForCluster(cc, viper.GetDuration(waitTimeout)); err != nil { + if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { exit.WithError("Wait failed", err) } } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 6bf1aef74a..efea8548fb 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -73,7 +73,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo // Skip pre-existing, because we already waited for health if viper.GetBool(waitUntilHealthy) && !preExists { - if err := bs.WaitForCluster(cc, viper.GetDuration(waitTimeout)); err != nil { + if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { exit.WithError("Wait failed", err) } } From e7f8abc048a314a4cb254007c043aaae49095dd9 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 26 Feb 2020 15:40:28 -0800 Subject: [PATCH 033/668] fix waitfornode --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 7ccef6ee40..1316612771 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -249,16 +249,19 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error return c, err } -// WaitForCluster blocks until the node appears to be healthy +// WaitForNode blocks until the node appears to be healthy func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, timeout time.Duration) error { start := time.Now() - out.T(out.Waiting, "Waiting for cluster to come online ...") + out.T(out.Waiting, "Waiting for node {{.name}} to come online ...", out.V{"name": n.Name}) cp, err := config.PrimaryControlPlane(cfg) if err != nil { return err } - if err := kverify.APIServerProcess(k.c, start, timeout); err != nil { - return err + + if n.ControlPlane { + if err := kverify.APIServerProcess(k.c, start, timeout); err != nil { + return err + } } ip := cp.IP @@ -270,8 +273,10 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name) } } - if err := kverify.APIServerIsRunning(start, ip, port, timeout); err != nil { - return err + if n.ControlPlane { + if err := kverify.APIServerIsRunning(start, ip, port, timeout); err != nil { + return err + } } c, err := k.client(ip, port) @@ -369,7 +374,6 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC // Join the master by specifying its token joinCmd = fmt.Sprintf("%s --v=10 --node-name=%s", joinCmd, n.Name) - fmt.Println(joinCmd) out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd)) if err != nil { return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out) From 9f82d6855b31936c80b1addff468da917db2c7a1 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 26 Feb 2020 15:51:14 -0800 Subject: [PATCH 034/668] actually cache images on cluster startup --- cmd/minikube/cmd/cache.go | 4 ++-- pkg/minikube/cluster/setup.go | 4 ++++ pkg/minikube/config/config.go | 14 ++++++++++++++ pkg/minikube/node/cache.go | 13 ------------- pkg/minikube/node/start.go | 2 +- 5 files changed, 21 insertions(+), 16 deletions(-) diff --git a/cmd/minikube/cmd/cache.go b/cmd/minikube/cmd/cache.go index eb91371984..f91f508b00 100644 --- a/cmd/minikube/cmd/cache.go +++ b/cmd/minikube/cmd/cache.go @@ -19,10 +19,10 @@ package cmd import ( "github.com/spf13/cobra" cmdConfig "k8s.io/minikube/cmd/minikube/cmd/config" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/image" "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/node" ) // cacheImageConfigKey is the config field name used to store which images we have previously cached @@ -75,7 +75,7 @@ var reloadCacheCmd = &cobra.Command{ Short: "reload cached images.", Long: "reloads images previously added using the 'cache add' subcommand", Run: func(cmd *cobra.Command, args []string) { - err := node.CacheAndLoadImagesInConfig() + err := config.CacheAndLoadImagesInConfig() if err != nil { exit.WithError("Failed to reload cached images", err) } diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index f09a8188c3..1d194ca900 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -79,6 +79,10 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str exit.WithError("Error starting cluster", err) } + if err := config.CacheAndLoadImagesInConfig(); err != nil { + out.T(out.FailureType, "Unable to load cached images from config file.") + } + // enable addons, both old and new! if existingAddons != nil { addons.Start(viper.GetString(config.MachineProfile), existingAddons, config.AddonList) diff --git a/pkg/minikube/config/config.go b/pkg/minikube/config/config.go index bf7e0bdd28..c0ab5d0976 100644 --- a/pkg/minikube/config/config.go +++ b/pkg/minikube/config/config.go @@ -26,6 +26,7 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/machine" ) const ( @@ -201,3 +202,16 @@ func (c *simpleConfigLoader) WriteConfigToFile(profileName string, cc *ClusterCo } return ioutil.WriteFile(path, contents, 0644) } + +// CacheAndLoadImagesInConfig loads the images currently in the config file +// called by 'start' and 'cache reload' commands. +func CacheAndLoadImagesInConfig() error { + images, err := imagesInConfigFile() + if err != nil { + return err + } + if len(images) == 0 { + return nil + } + return machine.CacheAndLoadImages(images) +} diff --git a/pkg/minikube/node/cache.go b/pkg/minikube/node/cache.go index 03b854a99e..41192d523f 100644 --- a/pkg/minikube/node/cache.go +++ b/pkg/minikube/node/cache.go @@ -116,16 +116,3 @@ func imagesInConfigFile() ([]string, error) { } return []string{}, nil } - -// CacheAndLoadImagesInConfig loads the images currently in the config file -// called by 'start' and 'cache reload' commands. -func CacheAndLoadImagesInConfig() error { - images, err := imagesInConfigFile() - if err != nil { - return err - } - if len(images) == 0 { - return nil - } - return machine.CacheAndLoadImages(images) -} diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index efea8548fb..8ce658c836 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -67,7 +67,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo exit.WithError("Failed to update node", err) } - if err := CacheAndLoadImagesInConfig(); err != nil { + if err := config.CacheAndLoadImagesInConfig(); err != nil { out.T(out.FailureType, "Unable to load cached images from config file.") } From 2bf3b9a9f93254fb1d46172992e70a757fcaebbf Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 27 Feb 2020 11:40:51 -0800 Subject: [PATCH 035/668] move caching code to cluster --- cmd/minikube/cmd/cache.go | 4 +-- cmd/minikube/cmd/kubectl.go | 4 +-- pkg/minikube/{node => cluster}/cache.go | 33 +++++++++++++++++++------ pkg/minikube/cluster/setup.go | 2 +- pkg/minikube/config/config.go | 14 ----------- pkg/minikube/node/node.go | 13 +++++----- pkg/minikube/node/start.go | 8 +++--- 7 files changed, 41 insertions(+), 37 deletions(-) rename pkg/minikube/{node => cluster}/cache.go (79%) diff --git a/cmd/minikube/cmd/cache.go b/cmd/minikube/cmd/cache.go index f91f508b00..ab1f075853 100644 --- a/cmd/minikube/cmd/cache.go +++ b/cmd/minikube/cmd/cache.go @@ -19,7 +19,7 @@ package cmd import ( "github.com/spf13/cobra" cmdConfig "k8s.io/minikube/cmd/minikube/cmd/config" - "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/image" "k8s.io/minikube/pkg/minikube/machine" @@ -75,7 +75,7 @@ var reloadCacheCmd = &cobra.Command{ Short: "reload cached images.", Long: "reloads images previously added using the 'cache add' subcommand", Run: func(cmd *cobra.Command, args []string) { - err := config.CacheAndLoadImagesInConfig() + err := cluster.CacheAndLoadImagesInConfig() if err != nil { exit.WithError("Failed to reload cached images", err) } diff --git a/cmd/minikube/cmd/kubectl.go b/cmd/minikube/cmd/kubectl.go index ebceeb9468..342b9b283d 100644 --- a/cmd/minikube/cmd/kubectl.go +++ b/cmd/minikube/cmd/kubectl.go @@ -25,10 +25,10 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -59,7 +59,7 @@ minikube kubectl -- get pods --namespace kube-system`, version = cc.KubernetesConfig.KubernetesVersion } - path, err := node.CacheKubectlBinary(version) + path, err := cluster.CacheKubectlBinary(version) if err != nil { out.ErrLn("Error caching kubectl: %v", err) } diff --git a/pkg/minikube/node/cache.go b/pkg/minikube/cluster/cache.go similarity index 79% rename from pkg/minikube/node/cache.go rename to pkg/minikube/cluster/cache.go index 41192d523f..04e7c8b6c6 100644 --- a/pkg/minikube/node/cache.go +++ b/pkg/minikube/cluster/cache.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package node +package cluster import ( "os" @@ -33,8 +33,13 @@ import ( "k8s.io/minikube/pkg/minikube/out" ) -// beginCacheRequiredImages caches images required for kubernetes version in the background -func beginCacheRequiredImages(g *errgroup.Group, imageRepository string, k8sVersion string) { +const ( + cacheImages = "cache-images" + cacheImageConfigKey = "cache" +) + +// BeginCacheRequiredImages caches images required for kubernetes version in the background +func BeginCacheRequiredImages(g *errgroup.Group, imageRepository string, k8sVersion string) { if !viper.GetBool("cache-images") { return } @@ -44,7 +49,8 @@ func beginCacheRequiredImages(g *errgroup.Group, imageRepository string, k8sVers }) } -func handleDownloadOnly(cacheGroup *errgroup.Group, k8sVersion string) { +// HandleDownloadOnly handles the download-only parameter +func HandleDownloadOnly(cacheGroup *errgroup.Group, k8sVersion string) { // If --download-only, complete the remaining downloads and exit. if !viper.GetBool("download-only") { return @@ -55,7 +61,7 @@ func handleDownloadOnly(cacheGroup *errgroup.Group, k8sVersion string) { if _, err := CacheKubectlBinary(k8sVersion); err != nil { exit.WithError("Failed to cache kubectl", err) } - waitCacheRequiredImages(cacheGroup) + WaitCacheRequiredImages(cacheGroup) if err := saveImagesToTarFromConfig(); err != nil { exit.WithError("Failed to cache images to tar", err) } @@ -79,8 +85,8 @@ func doCacheBinaries(k8sVersion string) error { return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdcfg.Bootstrapper)) } -// waitCacheRequiredImages blocks until the required images are all cached. -func waitCacheRequiredImages(g *errgroup.Group) { +// WaitCacheRequiredImages blocks until the required images are all cached. +func WaitCacheRequiredImages(g *errgroup.Group) { if !viper.GetBool(cacheImages) { return } @@ -102,6 +108,19 @@ func saveImagesToTarFromConfig() error { return image.SaveToDir(images, constants.ImageCacheDir) } +// CacheAndLoadImagesInConfig loads the images currently in the config file +// called by 'start' and 'cache reload' commands. +func CacheAndLoadImagesInConfig() error { + images, err := imagesInConfigFile() + if err != nil { + return err + } + if len(images) == 0 { + return nil + } + return machine.CacheAndLoadImages(images) +} + func imagesInConfigFile() ([]string, error) { configFile, err := config.ReadConfig(localpath.ConfigFile()) if err != nil { diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index 1d194ca900..931a21f812 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -79,7 +79,7 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str exit.WithError("Error starting cluster", err) } - if err := config.CacheAndLoadImagesInConfig(); err != nil { + if err := CacheAndLoadImagesInConfig(); err != nil { out.T(out.FailureType, "Unable to load cached images from config file.") } diff --git a/pkg/minikube/config/config.go b/pkg/minikube/config/config.go index c0ab5d0976..bf7e0bdd28 100644 --- a/pkg/minikube/config/config.go +++ b/pkg/minikube/config/config.go @@ -26,7 +26,6 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/machine" ) const ( @@ -202,16 +201,3 @@ func (c *simpleConfigLoader) WriteConfigToFile(profileName string, cc *ClusterCo } return ioutil.WriteFile(path, contents, 0644) } - -// CacheAndLoadImagesInConfig loads the images currently in the config file -// called by 'start' and 'cache reload' commands. -func CacheAndLoadImagesInConfig() error { - images, err := imagesInConfigFile() - if err != nil { - return err - } - if len(images) == 0 { - return nil - } - return machine.CacheAndLoadImages(images) -} diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 1f2147cbc5..940661bc95 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -25,14 +25,13 @@ import ( "k8s.io/minikube/pkg/minikube/machine" ) +// TODO: Share these between cluster and node packages const ( - cacheImages = "cache-images" - waitUntilHealthy = "wait" - cacheImageConfigKey = "cache" - containerRuntime = "container-runtime" - mountString = "mount-string" - createMount = "mount" - waitTimeout = "wait-timeout" + waitUntilHealthy = "wait" + containerRuntime = "container-runtime" + mountString = "mount-string" + createMount = "mount" + waitTimeout = "wait-timeout" ) // Add adds a new node config to an existing cluster. diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 8ce658c836..f770b0c88d 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -32,7 +32,7 @@ import ( func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) error { // Now that the ISO is downloaded, pull images in the background while the VM boots. var cacheGroup errgroup.Group - beginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion) + cluster.BeginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion) // Why do we need this? if cc.Downloader == nil { @@ -50,11 +50,11 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo k8sVersion := cc.KubernetesConfig.KubernetesVersion driverName := cc.Driver // exits here in case of --download-only option. - handleDownloadOnly(&cacheGroup, k8sVersion) + cluster.HandleDownloadOnly(&cacheGroup, k8sVersion) // configure the runtime (docker, containerd, crio) cr := configureRuntimes(runner, driverName, cc.KubernetesConfig) showVersionInfo(k8sVersion, cr) - waitCacheRequiredImages(&cacheGroup) + cluster.WaitCacheRequiredImages(&cacheGroup) configureMounts() @@ -67,7 +67,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo exit.WithError("Failed to update node", err) } - if err := config.CacheAndLoadImagesInConfig(); err != nil { + if err := cluster.CacheAndLoadImagesInConfig(); err != nil { out.T(out.FailureType, "Unable to load cached images from config file.") } From 0cc0a25c28e46037d523a258e7021ba6a66b509e Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 27 Feb 2020 14:37:15 -0800 Subject: [PATCH 036/668] passing correct machine name around --- cmd/minikube/cmd/delete.go | 18 ++++++++++-------- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 6 +++--- pkg/minikube/cluster/setup.go | 4 ++++ 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index 9169a4fa71..892a851c67 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -235,14 +235,16 @@ func deleteProfile(profile *pkg_config.Profile) error { out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err}) } - for _, n := range cc.Nodes { - if err = machine.DeleteHost(api, driver.MachineName(profile.Name, n.Name)); err != nil { - switch errors.Cause(err).(type) { - case mcnerror.ErrHostDoesNotExist: - glog.Infof("%s cluster does not exist. Proceeding ahead with cleanup.", profile.Name) - default: - out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err}) - out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name}) + if cc != nil { + for _, n := range cc.Nodes { + if err = machine.DeleteHost(api, driver.MachineName(profile.Name, n.Name)); err != nil { + switch errors.Cause(err).(type) { + case mcnerror.ErrHostDoesNotExist: + glog.Infof("%s cluster does not exist. Proceeding ahead with cleanup.", profile.Name) + default: + out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err}) + out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name}) + } } } } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 1316612771..14aed83c6c 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -268,7 +268,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time port := cp.Port if driver.IsKIC(cfg.Driver) { ip = oci.DefaultBindIPV4 - port, err = oci.HostPortBinding(cfg.Driver, cfg.Name, port) + port, err = oci.HostPortBinding(cfg.Driver, driver.MachineName(cfg.Name, n.Name), port) if err != nil { return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name) } @@ -338,9 +338,9 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { port := n.Port if driver.IsKIC(cfg.Driver) { ip = oci.DefaultBindIPV4 - port, err = oci.HostPortBinding(cfg.Driver, cfg.Name, port) + port, err = oci.HostPortBinding(cfg.Driver, driver.MachineName(cfg.Name, n.Name), port) if err != nil { - return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name) + return errors.Wrapf(err, "get host-bind port %d for container %s", port, driver.MachineName(cfg.Name, n.Name)) } } client, err := k.client(ip, port) diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index 931a21f812..62b57730b1 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -29,6 +29,7 @@ import ( "github.com/docker/machine/libmachine/host" "github.com/golang/glog" "github.com/spf13/viper" + "golang.org/x/sync/errgroup" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/bootstrapper" @@ -57,6 +58,9 @@ const ( // InitialSetup performs all necessary operations on the initial control plane node when first spinning up a cluster func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) (*kubeconfig.Settings, error) { + var cacheGroup errgroup.Group + BeginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion) + _, preExists, machineAPI, host := StartMachine(&cc, &n) defer machineAPI.Close() From a349b865d83f514e11499a8c410ba24e93a28350 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 27 Feb 2020 15:09:05 -0800 Subject: [PATCH 037/668] correct machine name for selectDriver --- cmd/minikube/cmd/start.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 194c1c988a..0db8d68826 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -534,7 +534,11 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { return } - machineName := viper.GetString(config.MachineProfile) + cp, err := config.PrimaryControlPlane(*existing) + if err != nil { + glog.Warningf("selectDriver PrimaryControlPlane: %v", err) + } + machineName := driver.MachineName(viper.GetString(config.MachineProfile), cp.Name) h, err := api.Load(machineName) if err != nil { glog.Warningf("selectDriver api.Load: %v", err) From 9f41fd42e1cd78a285383f494000bc6f95f94e1a Mon Sep 17 00:00:00 2001 From: Prasad Katti Date: Tue, 3 Mar 2020 22:23:52 -0800 Subject: [PATCH 038/668] populate target port in service list --- cmd/minikube/cmd/service_list.go | 3 ++- pkg/minikube/service/service.go | 8 ++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/service_list.go b/cmd/minikube/cmd/service_list.go index 9d86d5a8b8..7dddcd826c 100644 --- a/cmd/minikube/cmd/service_list.go +++ b/cmd/minikube/cmd/service_list.go @@ -66,6 +66,7 @@ var serviceListCmd = &cobra.Command{ if len(serviceURL.URLs) == 0 { data = append(data, []string{serviceURL.Namespace, serviceURL.Name, "No node port"}) } else { + servicePortNames := strings.Join(serviceURL.PortNames, "\n") serviceURLs := strings.Join(serviceURL.URLs, "\n") // if we are running Docker on OSX we empty the internal service URLs @@ -73,7 +74,7 @@ var serviceListCmd = &cobra.Command{ serviceURLs = "" } - data = append(data, []string{serviceURL.Namespace, serviceURL.Name, "", serviceURLs}) + data = append(data, []string{serviceURL.Namespace, serviceURL.Name, servicePortNames, serviceURLs}) } } diff --git a/pkg/minikube/service/service.go b/pkg/minikube/service/service.go index 3f3c159cd4..16579e31b3 100644 --- a/pkg/minikube/service/service.go +++ b/pkg/minikube/service/service.go @@ -22,6 +22,7 @@ import ( "io" "net/url" "os" + "strconv" "strings" "text/template" "time" @@ -196,6 +197,13 @@ func printURLsForService(c typed_core.CoreV1Interface, ip, service, namespace st urls := []string{} portNames := []string{} for _, port := range svc.Spec.Ports { + + if port.Name != "" { + m[port.TargetPort.IntVal] = port.Name + } else { + m[port.TargetPort.IntVal] = strconv.Itoa(int(port.Port)) + } + if port.NodePort > 0 { var doc bytes.Buffer err = t.Execute(&doc, struct { From d19f27a3c293ca63b42ad550d39e9faa4f57abb4 Mon Sep 17 00:00:00 2001 From: Prasad Katti Date: Wed, 4 Mar 2020 13:48:23 -0800 Subject: [PATCH 039/668] Add name/port instead of just the name for the target port --- pkg/minikube/service/service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/service/service.go b/pkg/minikube/service/service.go index 16579e31b3..3cd16f79e7 100644 --- a/pkg/minikube/service/service.go +++ b/pkg/minikube/service/service.go @@ -199,7 +199,7 @@ func printURLsForService(c typed_core.CoreV1Interface, ip, service, namespace st for _, port := range svc.Spec.Ports { if port.Name != "" { - m[port.TargetPort.IntVal] = port.Name + m[port.TargetPort.IntVal] = fmt.Sprintf("%s/%d", port.Name, port.Port) } else { m[port.TargetPort.IntVal] = strconv.Itoa(int(port.Port)) } From b7463fda7eda282d3b806c6a6c35b2ff2841301c Mon Sep 17 00:00:00 2001 From: Prasad Katti Date: Wed, 4 Mar 2020 22:38:37 -0800 Subject: [PATCH 040/668] fix minikube service tests --- pkg/minikube/service/service_test.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pkg/minikube/service/service_test.go b/pkg/minikube/service/service_test.go index 47fd341f42..59a32f8454 100644 --- a/pkg/minikube/service/service_test.go +++ b/pkg/minikube/service/service_test.go @@ -134,13 +134,17 @@ var defaultNamespaceServiceInterface = &MockServiceInterface{ Spec: core.ServiceSpec{ Ports: []core.ServicePort{ { + Name: "port1", NodePort: int32(1111), + Port: int32(11111), TargetPort: intstr.IntOrString{ IntVal: int32(11111), }, }, { + Name: "port2", NodePort: int32(2222), + Port: int32(22222), TargetPort: intstr.IntOrString{ IntVal: int32(22222), }, @@ -324,7 +328,7 @@ func TestPrintURLsForService(t *testing.T) { serviceName: "mock-dashboard", namespace: "default", tmpl: template.Must(template.New("svc-arbitrary-template").Parse("{{.Name}}={{.IP}}:{{.Port}}")), - expectedOutput: []string{"port1=127.0.0.1:1111", "port2=127.0.0.1:2222"}, + expectedOutput: []string{"port1/11111=127.0.0.1:1111", "port2/22222=127.0.0.1:2222"}, }, { description: "empty slice for no node ports", @@ -452,7 +456,7 @@ func TestGetServiceURLs(t *testing.T) { Namespace: "default", Name: "mock-dashboard", URLs: []string{"http://127.0.0.1:1111", "http://127.0.0.1:2222"}, - PortNames: []string{"port1", "port2"}, + PortNames: []string{"port1/11111", "port2/22222"}, }, { Namespace: "default", From 5a32eb7c4ca0cee17327c0ba573988413f335496 Mon Sep 17 00:00:00 2001 From: rajula96reddy Date: Wed, 6 Nov 2019 16:52:23 +0530 Subject: [PATCH 041/668] Updated error handling when service or namespace doesn't exist --- cmd/minikube/cmd/service.go | 2 +- pkg/minikube/service/service.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/minikube/cmd/service.go b/cmd/minikube/cmd/service.go index 19827913d9..612d45be4e 100644 --- a/cmd/minikube/cmd/service.go +++ b/cmd/minikube/cmd/service.go @@ -104,7 +104,7 @@ var serviceCmd = &cobra.Command{ urls, err := service.WaitForService(api, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval) if err != nil { - exit.WithError("Error opening service", err) + exit.WithCodeT(exit.Data, fmt.Sprintf("Error opening service: %s", err)) } openURLs(svc, urls) diff --git a/pkg/minikube/service/service.go b/pkg/minikube/service/service.go index 2d349d7a3b..b13157d538 100644 --- a/pkg/minikube/service/service.go +++ b/pkg/minikube/service/service.go @@ -276,7 +276,7 @@ func WaitForService(api libmachine.API, namespace string, service string, urlTem chkSVC := func() error { return CheckService(namespace, service) } if err := retry.Expo(chkSVC, time.Duration(interval)*time.Second, time.Duration(wait)*time.Second); err != nil { - return urlList, errors.Wrapf(err, "Service %s was not found in %q namespace. You may select another namespace by using 'minikube service %s -n ", service, namespace, service) + return urlList, errors.Wrapf(err, "Service %s was not found in %q namespace. You may select another namespace by using 'minikube service %s -n '. Or list out all the services using 'minikube service list'", service, namespace, service) } serviceURL, err := GetServiceURLsForService(api, namespace, service, urlTemplate) From 061b15999193f0898cdcf97317502e86713f7c90 Mon Sep 17 00:00:00 2001 From: rajula96reddy Date: Thu, 7 Nov 2019 12:08:25 +0530 Subject: [PATCH 042/668] Formatted the error output --- cmd/minikube/cmd/service.go | 2 +- pkg/minikube/service/service.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/minikube/cmd/service.go b/cmd/minikube/cmd/service.go index 612d45be4e..321b7ccbc9 100644 --- a/cmd/minikube/cmd/service.go +++ b/cmd/minikube/cmd/service.go @@ -104,7 +104,7 @@ var serviceCmd = &cobra.Command{ urls, err := service.WaitForService(api, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval) if err != nil { - exit.WithCodeT(exit.Data, fmt.Sprintf("Error opening service: %s", err)) + exit.WithCodeT(exit.Data, `Error opening service: {{.error}}`, out.V{"error": err}) } openURLs(svc, urls) diff --git a/pkg/minikube/service/service.go b/pkg/minikube/service/service.go index b13157d538..6e9b6e97cf 100644 --- a/pkg/minikube/service/service.go +++ b/pkg/minikube/service/service.go @@ -276,7 +276,8 @@ func WaitForService(api libmachine.API, namespace string, service string, urlTem chkSVC := func() error { return CheckService(namespace, service) } if err := retry.Expo(chkSVC, time.Duration(interval)*time.Second, time.Duration(wait)*time.Second); err != nil { - return urlList, errors.Wrapf(err, "Service %s was not found in %q namespace. You may select another namespace by using 'minikube service %s -n '. Or list out all the services using 'minikube service list'", service, namespace, service) + return urlList, errors.Errorf(`Service %s was not found in %q namespace. +You may select another namespace by using 'minikube service %s -n '. Or list out all the services using 'minikube service list'`, service, namespace, service) } serviceURL, err := GetServiceURLsForService(api, namespace, service, urlTemplate) From 8b2289836a20972a6c8a9d86e030763483c61222 Mon Sep 17 00:00:00 2001 From: rajula96reddy Date: Wed, 18 Dec 2019 16:11:46 +0530 Subject: [PATCH 043/668] Modified the previous commit using Erros.New() function --- cmd/minikube/cmd/service.go | 6 +++++- pkg/minikube/service/service.go | 3 +-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/cmd/minikube/cmd/service.go b/cmd/minikube/cmd/service.go index 321b7ccbc9..d138c73af3 100644 --- a/cmd/minikube/cmd/service.go +++ b/cmd/minikube/cmd/service.go @@ -104,7 +104,11 @@ var serviceCmd = &cobra.Command{ urls, err := service.WaitForService(api, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval) if err != nil { - exit.WithCodeT(exit.Data, `Error opening service: {{.error}}`, out.V{"error": err}) + if err.Error() == "Service not found in namespace" { + exit.WithCodeT(exit.Data, `Service '{{.service}}' was not found in '{{.namespace}}' namespace. +You may select another namespace by using 'minikube service {{.service}} -n '. Or list out all the services using 'minikube service list'`, out.V{"service": svc, "namespace": namespace}) + } + exit.WithError("Error opening service", err) } openURLs(svc, urls) diff --git a/pkg/minikube/service/service.go b/pkg/minikube/service/service.go index 6e9b6e97cf..d7ff77a37b 100644 --- a/pkg/minikube/service/service.go +++ b/pkg/minikube/service/service.go @@ -276,8 +276,7 @@ func WaitForService(api libmachine.API, namespace string, service string, urlTem chkSVC := func() error { return CheckService(namespace, service) } if err := retry.Expo(chkSVC, time.Duration(interval)*time.Second, time.Duration(wait)*time.Second); err != nil { - return urlList, errors.Errorf(`Service %s was not found in %q namespace. -You may select another namespace by using 'minikube service %s -n '. Or list out all the services using 'minikube service list'`, service, namespace, service) + return nil, errors.New("Service not found in namespace") } serviceURL, err := GetServiceURLsForService(api, namespace, service, urlTemplate) From 40c1c80b42713118f441c0185983bffe3da576bf Mon Sep 17 00:00:00 2001 From: rajula96reddy Date: Tue, 14 Jan 2020 16:16:36 +0530 Subject: [PATCH 044/668] Created new error type & made corresponding changes --- cmd/minikube/cmd/service.go | 12 +++++++++++- pkg/minikube/service/service.go | 23 +++++++++++++++++++++-- 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/cmd/minikube/cmd/service.go b/cmd/minikube/cmd/service.go index d138c73af3..7a8f37cdc5 100644 --- a/cmd/minikube/cmd/service.go +++ b/cmd/minikube/cmd/service.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "errors" "fmt" "net/url" "os" @@ -57,6 +58,14 @@ var ( interval int ) +// type serviceNotFoundError struct { +// Err error +// } + +// func (t serviceNotFoundError) Error() string { +// return "Service not found: " + t.Err.Error() +// } + // serviceCmd represents the service command var serviceCmd = &cobra.Command{ Use: "service [flags] SERVICE", @@ -104,7 +113,8 @@ var serviceCmd = &cobra.Command{ urls, err := service.WaitForService(api, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval) if err != nil { - if err.Error() == "Service not found in namespace" { + var s *service.SVCNotFoundError + if errors.As(err, &s) { exit.WithCodeT(exit.Data, `Service '{{.service}}' was not found in '{{.namespace}}' namespace. You may select another namespace by using 'minikube service {{.service}} -n '. Or list out all the services using 'minikube service list'`, out.V{"service": svc, "namespace": namespace}) } diff --git a/pkg/minikube/service/service.go b/pkg/minikube/service/service.go index d7ff77a37b..e077f7f7be 100644 --- a/pkg/minikube/service/service.go +++ b/pkg/minikube/service/service.go @@ -22,6 +22,7 @@ import ( "io" "net/url" "os" + "sort" "strings" "text/template" "time" @@ -264,19 +265,37 @@ func PrintServiceList(writer io.Writer, data [][]string) { table.Render() } +// SVCNotFoundError error type handles 'service not found' scenarios +type SVCNotFoundError struct { + Err error +} + +// Error method for SVCNotFoundError type +func (t SVCNotFoundError) Error() string { + return "Service not found" +} + // WaitForService waits for a service, and return the urls when available func WaitForService(api libmachine.API, namespace string, service string, urlTemplate *template.Template, urlMode bool, https bool, wait int, interval int) ([]string, error) { - var urlList []string // Convert "Amount of time to wait" and "interval of each check" to attempts if interval == 0 { interval = 1 } + services, err := GetServiceURLs(api, namespace, urlTemplate) + if err != nil { + return nil, err + } + searchServices := sort.Search(len(services), func(i int) bool { return services[i].Name == service }) + if searchServices == len(services) { + return nil, &SVCNotFoundError{err} + } + chkSVC := func() error { return CheckService(namespace, service) } if err := retry.Expo(chkSVC, time.Duration(interval)*time.Second, time.Duration(wait)*time.Second); err != nil { - return nil, errors.New("Service not found in namespace") + return nil, &SVCNotFoundError{err} } serviceURL, err := GetServiceURLsForService(api, namespace, service, urlTemplate) From 5d8d3f01b5e1b13226e07c3cff1226e25ace438f Mon Sep 17 00:00:00 2001 From: rajula96reddy Date: Wed, 5 Feb 2020 00:48:00 +0530 Subject: [PATCH 045/668] Modified 'checking if service exists before retry' logic --- pkg/minikube/service/service.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pkg/minikube/service/service.go b/pkg/minikube/service/service.go index e077f7f7be..26be52d267 100644 --- a/pkg/minikube/service/service.go +++ b/pkg/minikube/service/service.go @@ -22,7 +22,6 @@ import ( "io" "net/url" "os" - "sort" "strings" "text/template" "time" @@ -283,12 +282,9 @@ func WaitForService(api libmachine.API, namespace string, service string, urlTem if interval == 0 { interval = 1 } - services, err := GetServiceURLs(api, namespace, urlTemplate) + + err := CheckService(namespace, service) if err != nil { - return nil, err - } - searchServices := sort.Search(len(services), func(i int) bool { return services[i].Name == service }) - if searchServices == len(services) { return nil, &SVCNotFoundError{err} } From 3ffdaf3c1c6c674e439f3fc5ecebcc196da93d41 Mon Sep 17 00:00:00 2001 From: rajula96reddy Date: Fri, 7 Feb 2020 09:14:09 +0530 Subject: [PATCH 046/668] Deleted unnecessary comments --- cmd/minikube/cmd/service.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/cmd/minikube/cmd/service.go b/cmd/minikube/cmd/service.go index 7a8f37cdc5..736434dc0d 100644 --- a/cmd/minikube/cmd/service.go +++ b/cmd/minikube/cmd/service.go @@ -58,14 +58,6 @@ var ( interval int ) -// type serviceNotFoundError struct { -// Err error -// } - -// func (t serviceNotFoundError) Error() string { -// return "Service not found: " + t.Err.Error() -// } - // serviceCmd represents the service command var serviceCmd = &cobra.Command{ Use: "service [flags] SERVICE", From 70ca2de554b0c1474644f74e207c26fa63b22849 Mon Sep 17 00:00:00 2001 From: Kamesh Sampath Date: Sun, 16 Feb 2020 19:18:45 +0530 Subject: [PATCH 047/668] (fix) Create addon to configure registry helper - Service Account and binding to run the job - Registry aliases ConfigMap - Registry aliases daemonset to update the node etc/hosts fixes: 4604 Signed-off-by: Kamesh Sampath --- .../node-etc-hosts-update.tmpl | 51 +++++++++++++++++++ .../registry-aliases/patch-coredns-job.tmpl | 26 ++++++++++ .../registry-aliases-config.tmpl | 18 +++++++ .../registry-aliases-sa-crb.tmpl | 12 +++++ .../registry-aliases/registry-aliases-sa.tmpl | 5 ++ pkg/addons/config.go | 7 +++ pkg/minikube/assets/addons.go | 32 ++++++++++++ 7 files changed, 151 insertions(+) create mode 100644 deploy/addons/registry-aliases/node-etc-hosts-update.tmpl create mode 100644 deploy/addons/registry-aliases/patch-coredns-job.tmpl create mode 100644 deploy/addons/registry-aliases/registry-aliases-config.tmpl create mode 100644 deploy/addons/registry-aliases/registry-aliases-sa-crb.tmpl create mode 100644 deploy/addons/registry-aliases/registry-aliases-sa.tmpl diff --git a/deploy/addons/registry-aliases/node-etc-hosts-update.tmpl b/deploy/addons/registry-aliases/node-etc-hosts-update.tmpl new file mode 100644 index 0000000000..0ef938876b --- /dev/null +++ b/deploy/addons/registry-aliases/node-etc-hosts-update.tmpl @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: registry-aliases-hosts-update + namespace: kube-system + labels: + kubernetes.io/minikube-addons: registry-aliases + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + app: registry-aliases-hosts-update + template: + metadata: + labels: + app: registry-aliases-hosts-update + spec: + initContainers: + - name: update + image: registry.fedoraproject.org/fedora + volumeMounts: + - name: etchosts + mountPath: /host-etc/hosts + readOnly: false + env: + - name: REGISTRY_ALIASES + valueFrom: + configMapKeyRef: + name: registry-aliases + key: registryAliases + command: + - bash + - -ce + - | + NL=$'\n' + TAB=$'\t' + HOSTS="$(cat /host-etc/hosts)" + [ -z "$REGISTRY_SERVICE_HOST" ] && echo "Failed to get hosts entry for default registry" && exit 1; + for H in $REGISTRY_ALIASES; do + echo "$HOSTS" | grep "$H" || HOSTS="$HOSTS$NL$REGISTRY_SERVICE_HOST$TAB$H"; + done; + echo "$HOSTS" | diff -u /host-etc/hosts - || echo "$HOSTS" > /host-etc/hosts + echo "Done." + containers: + - name: pause-for-update + image: gcr.io/google_containers/pause-amd64:3.1 + terminationGracePeriodSeconds: 30 + volumes: + - name: etchosts + hostPath: + path: /etc/hosts diff --git a/deploy/addons/registry-aliases/patch-coredns-job.tmpl b/deploy/addons/registry-aliases/patch-coredns-job.tmpl new file mode 100644 index 0000000000..cdda3bc7e5 --- /dev/null +++ b/deploy/addons/registry-aliases/patch-coredns-job.tmpl @@ -0,0 +1,26 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: registry-aliases-patch-core-dns + namespace: kube-system +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + serviceAccountName: registry-aliases-sa + volumes: + - name: minikube + hostPath: + path: /var/lib/minikube/binaries + containers: + - name: core-dns-patcher + image: quay.io/rhdevelopers/core-dns-patcher + imagePullPolicy: IfNotPresent + # using the kubectl from the minikube instance + volumeMounts: + - mountPath: /var/lib/minikube/binaries + name: minikube + readOnly: true + restartPolicy: Never + backoffLimit: 4 \ No newline at end of file diff --git a/deploy/addons/registry-aliases/registry-aliases-config.tmpl b/deploy/addons/registry-aliases/registry-aliases-config.tmpl new file mode 100644 index 0000000000..0dacc4ed9c --- /dev/null +++ b/deploy/addons/registry-aliases/registry-aliases-config.tmpl @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: registry-aliases + namespace: kube-system + labels: + kubernetes.io/minikube-addons: registry-aliases + addonmanager.kubernetes.io/mode: Reconcile +data: + # Add additonal hosts seperated by new-line + registryAliases: >- + example.org + example.com + test.com + test.org + # default registry address in minikube when enabled via minikube addons enable registry + registrySvc: registry.kube-system.svc.cluster.local + diff --git a/deploy/addons/registry-aliases/registry-aliases-sa-crb.tmpl b/deploy/addons/registry-aliases/registry-aliases-sa-crb.tmpl new file mode 100644 index 0000000000..1ca1b60cc8 --- /dev/null +++ b/deploy/addons/registry-aliases/registry-aliases-sa-crb.tmpl @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: registry-aliases-crb +subjects: +- kind: ServiceAccount + name: registry-aliases-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/deploy/addons/registry-aliases/registry-aliases-sa.tmpl b/deploy/addons/registry-aliases/registry-aliases-sa.tmpl new file mode 100644 index 0000000000..a40fc37999 --- /dev/null +++ b/deploy/addons/registry-aliases/registry-aliases-sa.tmpl @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: registry-aliases-sa + namespace: kube-system \ No newline at end of file diff --git a/pkg/addons/config.go b/pkg/addons/config.go index 46c713d69f..f3ed918240 100644 --- a/pkg/addons/config.go +++ b/pkg/addons/config.go @@ -112,6 +112,13 @@ var Addons = []*Addon{ set: SetBool, callbacks: []setFn{enableOrDisableAddon}, }, + { + name: "registry-aliases", + set: SetBool, + callbacks: []setFn{enableOrDisableAddon}, + //TODO - add other settings + //TODO check if registry addon is enabled + }, { name: "storage-provisioner", set: SetBool, diff --git a/pkg/minikube/assets/addons.go b/pkg/minikube/assets/addons.go index b25446e8f6..4e20974058 100644 --- a/pkg/minikube/assets/addons.go +++ b/pkg/minikube/assets/addons.go @@ -246,6 +246,38 @@ var Addons = map[string]*Addon{ "0640", false), }, false, "registry-creds"), + "registry-aliases": NewAddon([]*BinAsset{ + MustBinAsset( + "deploy/addons/registry-aliases/registry-aliases-sa.tmpl", + vmpath.GuestAddonsDir, + "registry-aliases-sa.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/registry-aliases/registry-aliases-sa-crb.tmpl", + vmpath.GuestAddonsDir, + "registry-aliases-sa-crb.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/registry-aliases/registry-aliases-config.tmpl", + vmpath.GuestAddonsDir, + "registry-aliases-config.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/registry-aliases/node-etc-hosts-update.tmpl", + vmpath.GuestAddonsDir, + "node-etc-hosts-update.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/registry-aliases/patch-coredns-job.tmpl", + vmpath.GuestAddonsDir, + "patch-coredns-job.yaml", + "0640", + false), + }, false, "registry-aliases"), "freshpod": NewAddon([]*BinAsset{ MustBinAsset( "deploy/addons/freshpod/freshpod-rc.yaml.tmpl", From 28ffcb13a1e3e4dde9e6dd70d0af867bc04bd711 Mon Sep 17 00:00:00 2001 From: Kamesh Sampath Date: Thu, 20 Feb 2020 19:32:19 +0530 Subject: [PATCH 048/668] updated readme with usage instructions Signed-off-by: Kamesh Sampath --- deploy/addons/registry-aliases/README.md | 150 +++++++++++++++++++++++ 1 file changed, 150 insertions(+) create mode 100644 deploy/addons/registry-aliases/README.md diff --git a/deploy/addons/registry-aliases/README.md b/deploy/addons/registry-aliases/README.md new file mode 100644 index 0000000000..a111524bde --- /dev/null +++ b/deploy/addons/registry-aliases/README.md @@ -0,0 +1,150 @@ +# Minikube Registry Aliases Addon + +An addon to minikube that can help push and pull from the minikube registry using custom domain names. The custom domain names will be made resolveable from with in cluster and at minikube node. + +## How to use ? + +### Start minikube + +```shell +minikube profile demo +minikube start -p demo +``` +This addon depends on `registry` addon, it need to be enabled before the alias addon is installed: + +### Enable internal registry + +```shell +minikube addons enable registry +``` + +Verifying the registry deployment + +```shell +watch kubectl get pods -n kube-system +``` + +```shell +NAME READY STATUS RESTARTS AGE +coredns-6955765f44-kpbzt 1/1 Running 0 16m +coredns-6955765f44-lzlsv 1/1 Running 0 16m +etcd-demo 1/1 Running 0 16m +kube-apiserver-demo 1/1 Running 0 16m +kube-controller-manager-demo 1/1 Running 0 16m +kube-proxy-q8rb9 1/1 Running 0 16m +kube-scheduler-demo 1/1 Running 0 16m +*registry-4k8zs* 1/1 Running 0 40s +registry-proxy-vs8jt 1/1 Running 0 40s +storage-provisioner 1/1 Running 0 16m +``` + +```shell +kubectl get svc -n kube-system +``` + +```shell +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 17m +registry ClusterIP 10.97.247.75 80/TCP 94s +``` + +> +> **NOTE:** +> Please make a note of the CLUSTER-IP of `registry` service + +### Enable registry aliases addon + +```shell +minikube addons enable registry-aliases +🌟 The 'registry-aliases' addon is enabled +``` + +You can check the mikikube vm's `/etc/hosts` file for the registry aliases entries: + +```shell +watch minikube ssh -- cat /etc/hosts +``` + +```shell +127.0.0.1 localhost +127.0.1.1 demo +10.97.247.75 example.org +10.97.247.75 example.com +10.97.247.75 test.com +10.97.247.75 test.org +``` + +The above output shows that the Daemonset has added the `registryAliases` from the ConfigMap pointing to the internal registry's __CLUSTER-IP__. + +### Update CoreDNS + +The coreDNS would have been automatically updated by the patch-coredns. A successful job run will have coredns ConfigMap updated like: + +```yaml +apiVersion: v1 +data: + Corefile: |- + .:53 { + errors + health + rewrite name example.com registry.kube-system.svc.cluster.local + rewrite name example.org registry.kube-system.svc.cluster.local + rewrite name test.com registry.kube-system.svc.cluster.local + rewrite name test.org registry.kube-system.svc.cluster.local + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + upstream + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + proxy . /etc/resolv.conf + cache 30 + loop + reload + loadbalance + } +kind: ConfigMap +metadata: + name: coredns +``` + +To verify it run the following command: + +```shell +kubectl get cm -n kube-system coredns -o yaml +``` + +Once you have successfully patched you can now push and pull from the registry using suffix `example.com`, `example.org`,`test.com` and `test.org`. + +The successful run will show the following extra pods (Daemonset, Job) in `kube-system` namespace: + +```shell +NAME READY STATUS RESTARTS AGE +registry-aliases-hosts-update-995vx 1/1 Running 0 47s +registry-aliases-patch-core-dns-zsxfc 0/1 Completed 0 47s +``` + +## Verify with sample application + +You can verify the deployment end to end using the example [application](https://github.com/kameshsampath/minikube-registry-aliases-demo). + +```shell +git clone https://github.com/kameshsampath/minikube-registry-aliases-demo +cd minikube-registry-aliases-demo +``` + +Make sure you set the docker context using `eval $(minikube -p demo docker-env)` + +Deploy the application using [Skaffold](https://skaffold.dev): + +```shell +skaffold dev --port-forward +``` + +Once the application is running try doing `curl localhost:8080` to see the `Hello World` response + +You can also update [skaffold.yaml](./skaffold.yaml) and [app.yaml](.k8s/app.yaml), to use `test.org`, `test.com` or `example.org` as container registry urls, and see all the container image names resolves to internal registry, resulting in successful build and deployment. + +> **NOTE**: +> +> You can also update [skaffold.yaml](./skaffold.yaml) and [app. yaml](.k8s/app.yaml), to use `test.org`, `test.com` or > `example.org` as container registry urls, and see all the > container image names resolves to internal registry, resulting in successful build and deployment. From db27b59e075180085b199948855889179707aa2c Mon Sep 17 00:00:00 2001 From: Kamesh Sampath Date: Sun, 8 Mar 2020 09:36:21 +0530 Subject: [PATCH 049/668] (chore) fix README to use new profile format --- deploy/addons/registry-aliases/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/addons/registry-aliases/README.md b/deploy/addons/registry-aliases/README.md index a111524bde..1cebc54759 100644 --- a/deploy/addons/registry-aliases/README.md +++ b/deploy/addons/registry-aliases/README.md @@ -7,7 +7,6 @@ An addon to minikube that can help push and pull from the minikube registry usin ### Start minikube ```shell -minikube profile demo minikube start -p demo ``` This addon depends on `registry` addon, it need to be enabled before the alias addon is installed: From 05116abb1fedc501788a5cd80704d803329a05b8 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 9 Mar 2020 19:29:29 -0700 Subject: [PATCH 050/668] more fallout for the merge-pocalypse --- cmd/minikube/cmd/node_add.go | 12 +++++++----- cmd/minikube/cmd/ssh.go | 8 ++++++-- cmd/minikube/cmd/start.go | 2 +- pkg/minikube/cluster/cache.go | 3 ++- pkg/minikube/cluster/setup.go | 4 ++-- pkg/minikube/node/start.go | 12 +++--------- 6 files changed, 21 insertions(+), 20 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 3995e7a6d7..9247d872ed 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -43,17 +43,19 @@ var nodeAddCmd = &cobra.Command{ exit.WithError("Error getting config", err) } - //name := profile + strconv.Itoa(len(mc.Nodes)+1) name := fmt.Sprintf("m%d", len(cc.Nodes)+1) out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) - n, err := node.Add(cc, name, cp, worker, "", profile) - if err != nil { - exit.WithError("Error adding node to cluster", err) + // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. + n := config.Node{ + Name: name, + Worker: worker, + ControlPlane: cp, + KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, } - _, err = node.Start(*cc, *n, false, nil) + err = node.Add(cc, n) if err != nil { exit.WithError("Error adding node to cluster", err) } diff --git a/cmd/minikube/cmd/ssh.go b/cmd/minikube/cmd/ssh.go index 9da3698ba9..9c41e8c275 100644 --- a/cmd/minikube/cmd/ssh.go +++ b/cmd/minikube/cmd/ssh.go @@ -58,7 +58,11 @@ var sshCmd = &cobra.Command{ } n = &cp } else { - n = node.Retrieve(cc, nodeName) + n, _, err = node.Retrieve(cc, nodeName) + if err != nil { + out.FailureT("Node {{.nodeName}} does not exist.", out.V{"nodeName": nodeName}) + exit.WithError("", err) + } } host, err := machine.CheckIfHostExistsAndLoad(api, driver.MachineName(*cc, *n)) if err != nil { @@ -73,7 +77,7 @@ var sshCmd = &cobra.Command{ ssh.SetDefaultClient(ssh.External) } - err = machine.CreateSSHShell(api, *cc, cp, args) + err = machine.CreateSSHShell(api, *cc, *n, args) if err != nil { // This is typically due to a non-zero exit code, so no need for flourish. out.ErrLn("ssh: %v", err) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 0fd8f1c0c0..ba0d393f46 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -348,7 +348,7 @@ func runStart(cmd *cobra.Command, args []string) { // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. - if err := config.SaveProfile(viper.GetString(config.MachineProfile), &mc); err != nil { + if err := config.SaveProfile(viper.GetString(config.ProfileName), &mc); err != nil { exit.WithError("Failed to save config", err) } diff --git a/pkg/minikube/cluster/cache.go b/pkg/minikube/cluster/cache.go index e918eb454e..37fb7ae7cd 100644 --- a/pkg/minikube/cluster/cache.go +++ b/pkg/minikube/cluster/cache.go @@ -51,7 +51,8 @@ func BeginCacheRequiredImages(g *errgroup.Group, imageRepository string, k8sVers }) } -func handleDownloadOnly(cacheGroup, kicGroup *errgroup.Group, k8sVersion string) { +// HandleDownloadOnly caches appropariate binaries and images +func HandleDownloadOnly(cacheGroup, kicGroup *errgroup.Group, k8sVersion string) { // If --download-only, complete the remaining downloads and exit. if !viper.GetBool("download-only") { return diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index 62b57730b1..8b1af87a55 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -89,7 +89,7 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str // enable addons, both old and new! if existingAddons != nil { - addons.Start(viper.GetString(config.MachineProfile), existingAddons, config.AddonList) + addons.Start(viper.GetString(config.ProfileName), existingAddons, config.AddonList) } // special ops for none , like change minikube directory. @@ -111,7 +111,7 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str // setupKubeAdm adds any requested files into the VM before Kubernetes is started func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper { - bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg.Name, n.Name) + bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, n) if err != nil { exit.WithError("Failed to get bootstrapper", err) } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 3e88a23041..2c7eb18004 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -25,24 +25,18 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/util" ) // Start spins up a guest and starts the kubernetes node. func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) error { // Now that the ISO is downloaded, pull images in the background while the VM boots. - var cacheGroup errgroup.Group + var cacheGroup, kicGroup errgroup.Group cluster.BeginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion) - // Why do we need this? - if cc.Downloader == nil { - cc.Downloader = util.DefaultDownloader{} - } - runner, preExists, mAPI, _ := cluster.StartMachine(&cc, &n) defer mAPI.Close() - bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc.Name, n.Name) + bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n) if err != nil { exit.WithError("Failed to get bootstrapper", err) } @@ -50,7 +44,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo k8sVersion := cc.KubernetesConfig.KubernetesVersion driverName := cc.Driver // exits here in case of --download-only option. - cluster.HandleDownloadOnly(&cacheGroup, k8sVersion) + cluster.HandleDownloadOnly(&cacheGroup, &kicGroup, k8sVersion) // configure the runtime (docker, containerd, crio) cr := configureRuntimes(runner, driverName, cc.KubernetesConfig) showVersionInfo(k8sVersion, cr) From ef93b291ca278d8654c29e7809098606ee441f7d Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 10 Mar 2020 13:46:06 -0700 Subject: [PATCH 051/668] fix build failures --- pkg/minikube/machine/cluster_test.go | 2 -- pkg/minikube/node/node.go | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 2f7cd5b15b..e1f7d5a390 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -108,8 +108,6 @@ func TestStartHostExists(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - n := defaultNodeConfig - // Create an initial host. ih, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) if err != nil { diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index fb12bfca75..1b86069ee9 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -32,6 +32,7 @@ const ( mountString = "mount-string" createMount = "mount" waitTimeout = "wait-timeout" + imageRepository = "image-repository" ) // Add adds a new node config to an existing cluster. From 63dc9385047355283daf9f3434b582b22395edd1 Mon Sep 17 00:00:00 2001 From: sayboras Date: Mon, 9 Mar 2020 18:32:27 +1100 Subject: [PATCH 052/668] Update KUBECONFIG for change non user test Install lz4 in ubuntu --- .github/workflows/main.yml | 25 +++++++++++++++++++++++++ test/integration/none_test.go | 8 +++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a3572ed151..a2daefe32d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -88,6 +88,11 @@ jobs: docker info || true docker version || true docker ps || true + - name: install lz4 + shell: bash + run: | + sudo apt-get update -qq + sudo apt-get -qq -y install liblz4-tool - name: Install gopogh shell: bash run: | @@ -150,6 +155,11 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 needs: [build_minikube] steps: + - name: install lz4 + shell: bash + run: | + sudo apt-get update -qq + sudo apt-get -qq -y install liblz4-tool - name: Docker Info shell: bash run: | @@ -218,6 +228,11 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-16.04 steps: + - name: install lz4 + shell: bash + run: | + sudo apt-get update -qq + sudo apt-get -qq -y install liblz4-tool - name: Install gopogh shell: bash run: | @@ -280,6 +295,11 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: + - name: install lz4 + shell: bash + run: | + sudo apt-get update -qq + sudo apt-get -qq -y install liblz4-tool - name: Install gopogh shell: bash run: | @@ -342,6 +362,11 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: + - name: install lz4 + shell: bash + run: | + sudo apt-get update -qq + sudo apt-get -qq -y install liblz4-tool - name: install podman shell: bash run: | diff --git a/test/integration/none_test.go b/test/integration/none_test.go index e95468f86c..873465d5ef 100644 --- a/test/integration/none_test.go +++ b/test/integration/none_test.go @@ -77,7 +77,13 @@ func TestChangeNoneUser(t *testing.T) { t.Errorf("Failed to convert uid to int: %v", err) } - for _, p := range []string{localpath.MiniPath(), filepath.Join(u.HomeDir, ".kube/config")} { + // Retrieve the kube config from env + kubeConfig := os.Getenv("KUBECONFIG") + if kubeConfig == "" { + kubeConfig = filepath.Join(u.HomeDir, ".kube/config") + } + + for _, p := range []string{localpath.MiniPath(), kubeConfig} { info, err := os.Stat(p) if err != nil { t.Errorf("stat(%s): %v", p, err) From f5de2ff7c05f5175af8b77ec926c4fde44afc579 Mon Sep 17 00:00:00 2001 From: RA489 Date: Wed, 19 Feb 2020 16:00:56 +0530 Subject: [PATCH 053/668] Improve docker performance if it is too slow --- site/content/en/docs/Reference/Drivers/docker.md | 1 + 1 file changed, 1 insertion(+) diff --git a/site/content/en/docs/Reference/Drivers/docker.md b/site/content/en/docs/Reference/Drivers/docker.md index 164691d267..f44261fbad 100644 --- a/site/content/en/docs/Reference/Drivers/docker.md +++ b/site/content/en/docs/Reference/Drivers/docker.md @@ -28,3 +28,4 @@ As an experimental driver, not all commands are supported on all platforms. Nota ## Troubleshooting * Run `minikube start --alsologtostderr -v=1` to debug crashes +* If your docker is too slow on mac os try [Improving docker performance](https://docs.docker.com/docker-for-mac/osxfs-caching/) From 3ed818c48860e6777a559f14f30a8a029d5357a9 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 12 Mar 2020 15:40:13 -0700 Subject: [PATCH 054/668] cosmetic fixes --- cmd/minikube/cmd/node_add.go | 2 +- cmd/minikube/cmd/node_start.go | 5 +---- cmd/minikube/cmd/start.go | 16 +++++++------- cmd/minikube/cmd/status.go | 11 ++++++++-- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 16 +++++++++----- pkg/minikube/node/node.go | 4 ++-- pkg/minikube/node/start.go | 23 ++++++++++++--------- 7 files changed, 45 insertions(+), 32 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 9247d872ed..a450684ec3 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -43,7 +43,7 @@ var nodeAddCmd = &cobra.Command{ exit.WithError("Error getting config", err) } - name := fmt.Sprintf("m%d", len(cc.Nodes)+1) + name := fmt.Sprintf("m%02d", len(cc.Nodes)+1) out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index 92f866f99e..658ee092bf 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -61,10 +61,7 @@ var nodeStartCmd = &cobra.Command{ } // Start it up baby - err = node.Start(*cc, *n, nil) - if err != nil { - out.FatalT("Failed to start node {{.name}}", out.V{"name": name}) - } + node.Start(*cc, *n, nil) }, } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 5df3d73d88..9b87afbd94 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -315,7 +315,7 @@ func runStart(cmd *cobra.Command, args []string) { } k8sVersion := getKubernetesVersion(existing) - mc, n, err := generateCfgFromFlags(cmd, k8sVersion, driverName) + cc, n, err := generateCfgFromFlags(cmd, k8sVersion, driverName) if err != nil { exit.WithError("Failed to generate config", err) } @@ -331,7 +331,7 @@ func runStart(cmd *cobra.Command, args []string) { if err != nil { exit.WithError("Failed to cache ISO", err) } - mc.MinikubeISO = url + cc.MinikubeISO = url } if viper.GetBool(nativeSSH) { @@ -350,16 +350,16 @@ func runStart(cmd *cobra.Command, args []string) { // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. - if err := config.SaveProfile(viper.GetString(config.ProfileName), &mc); err != nil { + if err := config.SaveProfile(viper.GetString(config.ProfileName), &cc); err != nil { exit.WithError("Failed to save config", err) } - kubeconfig, err := cluster.InitialSetup(mc, n, existingAddons) + kubeconfig, err := cluster.InitialSetup(cc, n, existingAddons) if err != nil { exit.WithError("Starting node", err) } - if err := showKubectlInfo(kubeconfig, k8sVersion, mc.Name); err != nil { + if err := showKubectlInfo(kubeconfig, k8sVersion, cc.Name); err != nil { glog.Errorf("kubectl info: %v", err) } @@ -369,14 +369,14 @@ func runStart(cmd *cobra.Command, args []string) { out.T(out.Meh, "The none driver is not compatible with multi-node clusters.") } for i := 1; i < numNodes; i++ { - nodeName := fmt.Sprintf("%s%d", n.Name, i+1) + nodeName := fmt.Sprintf("m%02d", i+1) n := config.Node{ Name: nodeName, Worker: true, ControlPlane: false, - KubernetesVersion: mc.KubernetesConfig.KubernetesVersion, + KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, } - err := node.Add(&mc, n) + err := node.Add(&cc, n) if err != nil { exit.WithError("adding node", err) } diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 012db5a980..6342706bd8 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -150,8 +150,15 @@ func exitCode(st *Status) int { func status(api libmachine.API, name string, controlPlane bool) (*Status, error) { - profile := strings.Split(name, "-")[0] - node := strings.Split(name, "-")[1] + var profile, node string + + if strings.Contains(name, "-") { + profile = strings.Split(name, "-")[0] + node = strings.Split(name, "-")[1] + } else { + profile = name + node = name + } st := &Status{ Name: node, diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 0f37e5ae11..2b28ed6f99 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -36,8 +36,8 @@ import ( const remoteContainerRuntime = "remote" // GenerateKubeadmYAML generates the kubeadm.yaml file -func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.Node) ([]byte, error) { - k8s := mc.KubernetesConfig +func GenerateKubeadmYAML(cc config.ClusterConfig, r cruntime.Manager, n config.Node) ([]byte, error) { + k8s := cc.KubernetesConfig version, err := ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { return nil, errors.Wrap(err, "parsing kubernetes version") @@ -50,7 +50,7 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.N } // In case of no port assigned, use default - cp, err := config.PrimaryControlPlane(&mc) + cp, err := config.PrimaryControlPlane(&cc) if err != nil { return nil, errors.Wrap(err, "getting control plane") } @@ -64,6 +64,11 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.N return nil, errors.Wrap(err, "generating extra component config for kubeadm") } + controlPlaneEndpoint := cp.IP + if n.ControlPlane { + controlPlaneEndpoint = "localhost" + } + opts := struct { CertDir string ServiceCIDR string @@ -91,7 +96,7 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.N KubernetesVersion: k8s.KubernetesVersion, EtcdDataDir: EtcdDataDir(), ClusterName: k8s.ClusterName, - NodeName: cp.Name, + NodeName: n.Name, CRISocket: r.SocketPath(), ImageRepository: k8s.ImageRepository, ComponentOptions: componentOpts, @@ -101,7 +106,7 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.N NodeIP: n.IP, // NOTE: If set to an specific VM IP, things may break if the IP changes on host restart // For multi-node, we may need to figure out an alternate strategy, like DNS or hosts files - ControlPlaneAddress: "localhost", + ControlPlaneAddress: controlPlaneEndpoint, } if k8s.ServiceCIDR != "" { @@ -126,6 +131,7 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.N if err := configTmpl.Execute(&b, opts); err != nil { return nil, err } + fmt.Printf("%s OPTS=%+v\n", n.Name, opts) glog.Infof("kubeadm config:\n%s\n", b.String()) return b.Bytes(), nil } diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 1b86069ee9..3dba42d1f0 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -43,8 +43,8 @@ func Add(cc *config.ClusterConfig, n config.Node) error { return err } - err = Start(*cc, n, nil) - return err + Start(*cc, n, nil) + return nil } // Delete stops and deletes the given node from the given cluster diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 0b2d4d3708..80fc09a52c 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -28,12 +28,12 @@ import ( ) // Start spins up a guest and starts the kubernetes node. -func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) error { +func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) { // Now that the ISO is downloaded, pull images in the background while the VM boots. var cacheGroup, kicGroup errgroup.Group cluster.BeginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) - runner, preExists, mAPI, _ := cluster.StartMachine(&cc, &n) + runner, _, mAPI, _ := cluster.StartMachine(&cc, &n) defer mAPI.Close() bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n) @@ -69,13 +69,6 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo out.T(out.FailureType, "Unable to load cached images from config file.") } - // Skip pre-existing, because we already waited for health - if viper.GetBool(waitUntilHealthy) && !preExists { - if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { - exit.WithError("Wait failed", err) - } - } - err = bs.SetupCerts(cc.KubernetesConfig, n) if err != nil { exit.WithError("setting up certs", err) @@ -93,5 +86,15 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo if err != nil { exit.WithError("generating join token", err) } - return bs.JoinCluster(cc, n, joinCmd) + err = bs.JoinCluster(cc, n, joinCmd) + if err != nil { + exit.WithError("joining cluster", err) + } + + /*// Skip pre-existing, because we already waited for health + if viper.GetBool(waitUntilHealthy) && !preExists { + if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { + exit.WithError("Wait failed", err) + } + }*/ } From b6ab2931d69b789d1a07e0e8930dfd444a4248c4 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 13 Mar 2020 11:29:46 -0700 Subject: [PATCH 055/668] run all necessary steps on all nodes --- pkg/minikube/bootstrapper/bootstrapper.go | 3 +- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 4 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 41 +++++++++++++------- pkg/minikube/node/start.go | 24 +++++------- 4 files changed, 42 insertions(+), 30 deletions(-) diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index f250423833..1dac315e80 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -42,7 +42,8 @@ type Bootstrapper interface { WaitForNode(config.ClusterConfig, config.Node, time.Duration) error JoinCluster(config.ClusterConfig, config.Node, string) error UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error - GenerateToken(config.KubernetesConfig) (string, error) + SetupNode(config.ClusterConfig) error + GenerateToken(config.ClusterConfig) (string, error) // LogCommands returns a map of log type to a command which will display that log. LogCommands(LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 2b28ed6f99..dacce16d43 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -65,9 +65,9 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, r cruntime.Manager, n config.N } controlPlaneEndpoint := cp.IP - if n.ControlPlane { + /*if n.ControlPlane { controlPlaneEndpoint = "localhost" - } + }*/ opts := struct { CertDir string diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index ca207f63d9..a5143a7587 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -200,22 +200,17 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } + err = k.SetupNode(cfg) + if err != nil { + return errors.Wrap(err, "setting up node") + } + c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), bsutil.KubeadmYamlPath, extraFlags, strings.Join(ignore, ","))) rr, err := k.c.RunCmd(c) if err != nil { return errors.Wrapf(err, "init failed. output: %q", rr.Output()) } - if cfg.Driver == driver.Docker { - if err := k.applyKicOverlay(cfg); err != nil { - return errors.Wrap(err, "apply kic overlay") - } - } - - if err := k.applyNodeLabels(cfg); err != nil { - glog.Warningf("unable to apply node labels: %v", err) - } - if err := bsutil.AdjustResourceLimits(k.c); err != nil { glog.Warningf("unable to adjust resource limits: %v", err) } @@ -227,6 +222,20 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { return nil } +func (k *Bootstrapper) SetupNode(cfg config.ClusterConfig) error { + if cfg.Driver == driver.Docker { + if err := k.applyKicOverlay(cfg); err != nil { + return errors.Wrap(err, "apply kic overlay") + } + } + + if err := k.applyNodeLabels(cfg); err != nil { + glog.Warningf("unable to apply node labels: %v", err) + } + + return nil +} + // client sets and returns a Kubernetes client to use to speak to a kubeadm launched apiserver func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error) { if k.k8sClient != nil { @@ -384,14 +393,20 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC } // GenerateToken creates a token and returns the appropriate kubeadm join command to run -func (k *Bootstrapper) GenerateToken(k8s config.KubernetesConfig) (string, error) { - tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(k8s.KubernetesVersion))) +func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { + tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion))) r, err := k.c.RunCmd(tokenCmd) if err != nil { return "", errors.Wrap(err, "generating bootstrap token") } + + /*cp, err := config.PrimaryControlPlane(&cc) + if err != nil { + return "", errors.Wrap(err, "getting primary control plane") + }*/ joinCmd := r.Stdout.String() - joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(k8s.KubernetesVersion), 1) + joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), 1) + //joinCmd = strings.ReplaceAll(joinCmd, "localhost", cp.IP) joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) return joinCmd, nil diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 80fc09a52c..1550390771 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -24,7 +24,6 @@ import ( "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/out" ) // Start spins up a guest and starts the kubernetes node. @@ -66,14 +65,17 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo } if err := cluster.CacheAndLoadImagesInConfig(); err != nil { - out.T(out.FailureType, "Unable to load cached images from config file.") + exit.WithError("Unable to load cached images from config file.", err) } - err = bs.SetupCerts(cc.KubernetesConfig, n) - if err != nil { + if err = bs.SetupCerts(cc.KubernetesConfig, n); err != nil { exit.WithError("setting up certs", err) } + if err = bs.SetupNode(cc); err != nil { + exit.WithError("Failed to setup node", err) + } + cp, err := config.PrimaryControlPlane(&cc) if err != nil { exit.WithError("Getting primary control plane", err) @@ -82,19 +84,13 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo if err != nil { exit.WithError("Getting bootstrapper", err) } - joinCmd, err := cpBs.GenerateToken(cc.KubernetesConfig) + + joinCmd, err := cpBs.GenerateToken(cc) if err != nil { exit.WithError("generating join token", err) } - err = bs.JoinCluster(cc, n, joinCmd) - if err != nil { + + if err = bs.JoinCluster(cc, n, joinCmd); err != nil { exit.WithError("joining cluster", err) } - - /*// Skip pre-existing, because we already waited for health - if viper.GetBool(waitUntilHealthy) && !preExists { - if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { - exit.WithError("Wait failed", err) - } - }*/ } From a9b73b8ba3f539832f26ec2051d62791c391a097 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 13 Mar 2020 15:03:19 -0700 Subject: [PATCH 056/668] fixing up minikube start path --- cmd/minikube/cmd/start.go | 8 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 44 ++--- pkg/minikube/cluster/cache.go | 8 +- pkg/minikube/cluster/setup.go | 19 +- pkg/minikube/machine/cluster_test.go | 12 +- pkg/minikube/machine/start.go | 20 +- pkg/minikube/node/machine.go | 187 ------------------- pkg/minikube/node/start.go | 14 +- 8 files changed, 71 insertions(+), 241 deletions(-) delete mode 100644 pkg/minikube/node/machine.go diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 9b87afbd94..af7e32b078 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -359,10 +359,6 @@ func runStart(cmd *cobra.Command, args []string) { exit.WithError("Starting node", err) } - if err := showKubectlInfo(kubeconfig, k8sVersion, cc.Name); err != nil { - glog.Errorf("kubectl info: %v", err) - } - numNodes := viper.GetInt(nodes) if numNodes > 1 { if driver.IsKIC(driverName) { @@ -382,6 +378,10 @@ func runStart(cmd *cobra.Command, args []string) { } } } + + if err := showKubectlInfo(kubeconfig, k8sVersion, cc.Name); err != nil { + glog.Errorf("kubectl info: %v", err) + } } func updateDriver(driverName string) { diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index a5143a7587..cb1d0d1058 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -222,6 +222,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { return nil } +// SetupNode runs commands that need to be on all nodes func (k *Bootstrapper) SetupNode(cfg config.ClusterConfig) error { if cfg.Driver == driver.Docker { if err := k.applyKicOverlay(cfg); err != nil { @@ -339,33 +340,32 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "apiserver healthz") } - for _, n := range cfg.Nodes { - ip := n.IP - port := n.Port - if driver.IsKIC(cfg.Driver) { - ip = oci.DefaultBindIPV4 - port, err = oci.HostPortBinding(cfg.Driver, driver.MachineName(cfg, n), port) - if err != nil { - return errors.Wrapf(err, "get host-bind port %d for container %s", port, driver.MachineName(cfg, n)) - } - } - client, err := k.client(ip, port) + cp, err := config.PrimaryControlPlane(&cfg) + ip := cp.IP + port := cp.Port + if driver.IsKIC(cfg.Driver) { + ip = oci.DefaultBindIPV4 + port, err = oci.HostPortBinding(cfg.Driver, driver.MachineName(cfg, cp), port) if err != nil { - return errors.Wrap(err, "getting k8s client") + return errors.Wrapf(err, "get host-bind port %d for container %s", port, driver.MachineName(cfg, cp)) } + } + client, err := k.client(ip, port) + if err != nil { + return errors.Wrap(err, "getting k8s client") + } - if err := kverify.SystemPods(client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { - return errors.Wrap(err, "system pods") - } + if err := kverify.SystemPods(client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { + return errors.Wrap(err, "system pods") + } - // Explicitly re-enable kubeadm addons (proxy, coredns) so that they will check for IP or configuration changes. - if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, bsutil.KubeadmYamlPath))); err != nil { - return errors.Wrapf(err, fmt.Sprintf("addon phase cmd:%q", rr.Command())) - } + // Explicitly re-enable kubeadm addons (proxy, coredns) so that they will check for IP or configuration changes. + if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, bsutil.KubeadmYamlPath))); err != nil { + return errors.Wrapf(err, fmt.Sprintf("addon phase cmd:%q", rr.Command())) + } - if err := bsutil.AdjustResourceLimits(k.c); err != nil { - glog.Warningf("unable to adjust resource limits: %v", err) - } + if err := bsutil.AdjustResourceLimits(k.c); err != nil { + glog.Warningf("unable to adjust resource limits: %v", err) } return nil } diff --git a/pkg/minikube/cluster/cache.go b/pkg/minikube/cluster/cache.go index 809e87580b..e7563d1d78 100644 --- a/pkg/minikube/cluster/cache.go +++ b/pkg/minikube/cluster/cache.go @@ -40,8 +40,8 @@ const ( cacheImageConfigKey = "cache" ) -// BeginCacheRequiredImages caches images required for kubernetes version in the background -func BeginCacheRequiredImages(g *errgroup.Group, imageRepository string, k8sVersion string, cRuntime string) { +// BeginCacheKubernetesImages caches images required for kubernetes version in the background +func BeginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVersion string, cRuntime string) { if download.PreloadExists(k8sVersion, cRuntime) { g.Go(func() error { glog.Info("Caching tarball of preloaded images") @@ -96,8 +96,8 @@ func doCacheBinaries(k8sVersion string) error { return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdcfg.Bootstrapper)) } -// beginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available -func beginDownloadKicArtifacts(g *errgroup.Group) { +// BeginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available +func BeginDownloadKicArtifacts(g *errgroup.Group) { glog.Info("Beginning downloading kic artifacts") g.Go(func() error { glog.Infof("Downloading %s to local daemon", kic.BaseImage) diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index ad2fc408ff..cd653bbdbd 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -71,7 +71,17 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str bs := setupKubeAdm(machineAPI, cc, n) var cacheGroup errgroup.Group - BeginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) + if !driver.BareMetal(cc.Driver) { + BeginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) + } + + var kicGroup errgroup.Group + if driver.IsKIC(cc.Driver) { + BeginDownloadKicArtifacts(&kicGroup) + } + + HandleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion) + WaitDownloadKicArtifacts(&kicGroup) // pull images or restart cluster out.T(out.Launch, "Launching Kubernetes ... ") @@ -189,12 +199,7 @@ func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command. // startHost starts a new minikube host using a VM or None func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) { - exists, err := api.Exists(n.Name) - if err != nil { - exit.WithError("Failed to check if machine exists", err) - } - - host, err := machine.StartHost(api, mc, n) + host, exists, err := machine.StartHost(api, mc, n) if err != nil { exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) } diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 32dcc0ec0f..c4c26d27c2 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -130,7 +130,7 @@ func TestStartHostExists(t *testing.T) { n := config.Node{Name: ih.Name} // This should pass without calling Create because the host exists already. - h, err := StartHost(api, mc, n) + h, _, err := StartHost(api, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -164,7 +164,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { n := config.Node{Name: h.Name} // This should pass with creating host, while machine does not exist. - h, err = StartHost(api, mc, n) + h, _, err = StartHost(api, mc, n) if err != nil { if err != ErrorMachineNotExist { t.Fatalf("Error starting host: %v", err) @@ -177,7 +177,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { n.Name = h.Name // Second call. This should pass without calling Create because the host exists already. - h, err = StartHost(api, mc, n) + h, _, err = StartHost(api, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -210,7 +210,7 @@ func TestStartStoppedHost(t *testing.T) { mc := defaultClusterConfig mc.Name = h.Name n := config.Node{Name: h.Name} - h, err = StartHost(api, mc, n) + h, _, err = StartHost(api, mc, n) if err != nil { t.Fatal("Error starting host.") } @@ -238,7 +238,7 @@ func TestStartHost(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - h, err := StartHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) + h, _, err := StartHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) if err != nil { t.Fatal("Error starting host.") } @@ -272,7 +272,7 @@ func TestStartHostConfig(t *testing.T) { DockerOpt: []string{"param=value"}, } - h, err := StartHost(api, cfg, config.Node{Name: "minikube"}) + h, _, err := StartHost(api, cfg, config.Node{Name: "minikube"}) if err != nil { t.Fatal("Error starting host.") } diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index 8f44a12b66..368af5b3af 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -62,28 +62,32 @@ var ( ) // StartHost starts a host VM. -func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) { +func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, bool, error) { + machineName := driver.MachineName(cfg, n) + // Prevent machine-driver boot races, as well as our own certificate race - releaser, err := acquireMachinesLock(n.Name) + releaser, err := acquireMachinesLock(machineName) if err != nil { - return nil, errors.Wrap(err, "boot lock") + return nil, false, errors.Wrap(err, "boot lock") } start := time.Now() defer func() { - glog.Infof("releasing machines lock for %q, held for %s", n.Name, time.Since(start)) + glog.Infof("releasing machines lock for %q, held for %s", machineName, time.Since(start)) releaser.Release() }() - exists, err := api.Exists(n.Name) + exists, err := api.Exists(machineName) if err != nil { - return nil, errors.Wrapf(err, "exists: %s", n.Name) + return nil, false, errors.Wrapf(err, "exists: %s", machineName) } if !exists { glog.Infof("Provisioning new machine with config: %+v %+v", cfg, n) - return createHost(api, cfg, n) + h, err := createHost(api, cfg, n) + return h, exists, err } glog.Infoln("Skipping create...Using existing machine configuration") - return fixHost(api, cfg, n) + h, err := fixHost(api, cfg, n) + return h, exists, err } func engineOptions(cfg config.ClusterConfig) *engine.Options { diff --git a/pkg/minikube/node/machine.go b/pkg/minikube/node/machine.go deleted file mode 100644 index 483131515a..0000000000 --- a/pkg/minikube/node/machine.go +++ /dev/null @@ -1,187 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package node - -import ( - "fmt" - "net" - "os" - "os/exec" - "strings" - "time" - - "github.com/docker/machine/libmachine" - "github.com/docker/machine/libmachine/host" - "github.com/golang/glog" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/command" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" - "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/minikube/proxy" - "k8s.io/minikube/pkg/util/retry" -) - -func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { - m, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Failed to get machine client", err) - } - host, preExists = startHost(m, *cfg, *node) - runner, err = machine.CommandRunner(host) - if err != nil { - exit.WithError("Failed to get command runner", err) - } - - ip := validateNetwork(host, runner) - - // Bypass proxy for minikube's vm host ip - err = proxy.ExcludeIP(ip) - if err != nil { - out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) - } - // Save IP to configuration file for subsequent use - node.IP = ip - - if err := Save(cfg, node); err != nil { - exit.WithError("Failed to save config", err) - } - - return runner, preExists, m, host -} - -// startHost starts a new minikube host using a VM or None -func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) { - exists, err := api.Exists(mc.Name) - if err != nil { - exit.WithError("Failed to check if machine exists", err) - } - - host, err := machine.StartHost(api, mc, n) - if err != nil { - exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) - } - return host, exists -} - -// validateNetwork tries to catch network problems as soon as possible -func validateNetwork(h *host.Host, r command.Runner) string { - ip, err := h.Driver.GetIP() - if err != nil { - exit.WithError("Unable to get VM IP address", err) - } - - optSeen := false - warnedOnce := false - for _, k := range proxy.EnvVars { - if v := os.Getenv(k); v != "" { - if !optSeen { - out.T(out.Internet, "Found network options:") - optSeen = true - } - out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v}) - ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY - k = strings.ToUpper(k) // for http_proxy & https_proxy - if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { - out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"}) - warnedOnce = true - } - } - } - - if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) { - trySSH(h, ip) - } - - tryLookup(r) - tryRegistry(r) - return ip -} - -func trySSH(h *host.Host, ip string) { - if viper.GetBool("force") { - return - } - - sshAddr := net.JoinHostPort(ip, "22") - - dial := func() (err error) { - d := net.Dialer{Timeout: 3 * time.Second} - conn, err := d.Dial("tcp", sshAddr) - if err != nil { - out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err}) - return err - } - _ = conn.Close() - return nil - } - - if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil { - exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}} - - This is likely due to one of two reasons: - - - VPN or firewall interference - - {{.hypervisor}} network configuration issue - - Suggested workarounds: - - - Disable your local VPN or firewall software - - Configure your local VPN or firewall to allow access to {{.ip}} - - Restart or reinstall {{.hypervisor}} - - Use an alternative --driver - - Use --force to override this connectivity check - `, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip}) - } -} - -func tryLookup(r command.Runner) { - // DNS check - if rr, err := r.RunCmd(exec.Command("nslookup", "-type=ns", "kubernetes.io")); err != nil { - glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err) - // will try with without query type for ISOs with different busybox versions. - if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil { - glog.Warningf("nslookup failed: %v", err) - // try with the older "host" command, instead of the newer "nslookup" - if _, err = r.RunCmd(exec.Command("host", "kubernetes.io")); err != nil { - out.WarningT("Node may be unable to resolve external DNS records") - } - } - } -} -func tryRegistry(r command.Runner) { - // Try an HTTPS connection to the image repository - proxy := os.Getenv("HTTPS_PROXY") - opts := []string{"-sS"} - if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { - opts = append([]string{"-x", proxy}, opts...) - } - - repo := viper.GetString(imageRepository) - if repo == "" { - repo = images.DefaultKubernetesRepo - } - - opts = append(opts, fmt.Sprintf("https://%s/", repo)) - if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil { - glog.Warningf("%s failed: %v", rr.Args, err) - out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo}) - } -} diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 1550390771..28c6509c49 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -23,14 +23,22 @@ import ( "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" ) // Start spins up a guest and starts the kubernetes node. func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) { // Now that the ISO is downloaded, pull images in the background while the VM boots. - var cacheGroup, kicGroup errgroup.Group - cluster.BeginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) + var cacheGroup errgroup.Group + if !driver.BareMetal(cc.Driver) { + cluster.BeginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) + } + + var kicGroup errgroup.Group + if driver.IsKIC(cc.Driver) { + cluster.BeginDownloadKicArtifacts(&kicGroup) + } runner, _, mAPI, _ := cluster.StartMachine(&cc, &n) defer mAPI.Close() @@ -40,7 +48,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo exit.WithError("Failed to get bootstrapper", err) } - k8sVersion := cc.KubernetesConfig.KubernetesVersion + k8sVersion := n.KubernetesVersion driverName := cc.Driver // exits here in case of --download-only option. cluster.HandleDownloadOnly(&cacheGroup, &kicGroup, k8sVersion) From d98ebcfb687b530b31b7a7c6d15bd461c48da1c9 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 13 Mar 2020 15:32:14 -0700 Subject: [PATCH 057/668] lint --- cmd/minikube/cmd/stop.go | 34 ++++++++++++++------ pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 3 ++ pkg/minikube/node/node.go | 3 -- 3 files changed, 28 insertions(+), 12 deletions(-) diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index c3586b8492..005284a022 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -17,7 +17,12 @@ limitations under the License. package cmd import ( + "time" + "github.com/docker/machine/libmachine" + "github.com/docker/machine/libmachine/mcnerror" + "github.com/golang/glog" + "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" @@ -27,6 +32,7 @@ import ( "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/util/retry" ) // stopCmd represents the stop command @@ -72,16 +78,26 @@ func runStop(cmd *cobra.Command, args []string) { func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool { nonexistent := false - - // TODO replace this back with expo backoff - for _, n := range cluster.Nodes { - err := machine.StopHost(api, driver.MachineName(cluster, n)) - if err != nil { - exit.WithError("Unable to stop VM", err) + stop := func() (err error) { + machineName := driver.MachineName(cluster, n) + err = machine.StopHost(api, machineName) + if err == nil { + return nil } - /*if err := retry.Expo(fn, 5*time.Second, 3*time.Minute, 5); err != nil { - exit.WithError("Unable to stop VM", err) - }*/ + glog.Warningf("stop host returned error: %v", err) + + switch err := errors.Cause(err).(type) { + case mcnerror.ErrHostDoesNotExist: + out.T(out.Meh, `"{{.machineName}}" does not exist, nothing to stop`, out.V{"machineName": machineName}) + nonexistent = true + return nil + default: + return err + } + } + + if err := retry.Expo(stop, 5*time.Second, 3*time.Minute, 5); err != nil { + exit.WithError("Unable to stop VM", err) } return nonexistent diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 13d8f6e921..ed458c16b2 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -346,6 +346,9 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { } cp, err := config.PrimaryControlPlane(&cfg) + if err != nil { + return errors.Wrap(err, "getting control plane") + } ip := cp.IP port := cp.Port if driver.IsKIC(cfg.Driver) { diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 3dba42d1f0..41d518f3f2 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -27,12 +27,9 @@ import ( // TODO: Share these between cluster and node packages const ( - waitUntilHealthy = "wait" containerRuntime = "container-runtime" mountString = "mount-string" createMount = "mount" - waitTimeout = "wait-timeout" - imageRepository = "image-repository" ) // Add adds a new node config to an existing cluster. From 9c8102dd58f7403e1a18737d2b38bdc1ef715ea0 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Fri, 13 Mar 2020 16:05:46 -0700 Subject: [PATCH 058/668] Refactor preload_images.go to generate and upload a preloaded tarball This script is now responsible for making sure that the DefaultK8sVersion, NewestK8sVersion, and OldestK8sVersion supported by minikube have preloaded tarballs. It will be run on every PR as an automated release script for preloaded tarballs. --- Makefile | 10 +- hack/preload-images/generate.go | 125 +++++++++++++++++++++ hack/preload-images/preload_images.go | 151 ++++++-------------------- hack/preload-images/upload.go | 43 ++++++++ 4 files changed, 203 insertions(+), 126 deletions(-) create mode 100644 hack/preload-images/generate.go create mode 100644 hack/preload-images/upload.go diff --git a/Makefile b/Makefile index 9a4685affd..ac1787f480 100755 --- a/Makefile +++ b/Makefile @@ -526,14 +526,8 @@ kic-base-image: ## builds the base image used for kic. docker build -f ./hack/images/kicbase.Dockerfile -t $(REGISTRY)/kicbase:$(KIC_VERSION)-snapshot --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --target base . .PHONY: upload-preloaded-images-tar -upload-preloaded-images-tar: generate-preloaded-images-tar # Upload the preloaded images tar to the GCS bucket. Specify a specific kubernetes version to build via `KUBERNETES_VERSION=vx.y.z make upload-preloaded-images-tar`. - gsutil cp out/preloaded-images-k8s-${PRELOADED_TARBALL_VERSION}-${KUBERNETES_VERSION}-docker-overlay2.tar.lz4 gs://${PRELOADED_VOLUMES_GCS_BUCKET} - gsutil acl ch -u AllUsers:R gs://${PRELOADED_VOLUMES_GCS_BUCKET}/preloaded-images-k8s-${PRELOADED_TARBALL_VERSION}-${KUBERNETES_VERSION}-docker-overlay2.tar.lz4 - -.PHONY: generate-preloaded-images-tar -generate-preloaded-images-tar: - go run ./hack/preload-images/preload_images.go -kubernetes-version ${KUBERNETES_VERSION} -preloaded-tarball-version ${PRELOADED_TARBALL_VERSION} - +upload-preloaded-images-tar: out/minikube # Upload the preloaded images for oldest supported, newest supported, and default kubernetes versions to GCS. + go run ./hack/preload-images/*.go .PHONY: push-storage-provisioner-image push-storage-provisioner-image: storage-provisioner-image ## Push storage-provisioner docker image using gcloud diff --git a/hack/preload-images/generate.go b/hack/preload-images/generate.go new file mode 100644 index 0000000000..a3160bc5a1 --- /dev/null +++ b/hack/preload-images/generate.go @@ -0,0 +1,125 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/pkg/errors" + "k8s.io/minikube/pkg/drivers/kic" + "k8s.io/minikube/pkg/drivers/kic/oci" + "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" + "k8s.io/minikube/pkg/minikube/bootstrapper/images" + "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/localpath" +) + +func generateTarball(kubernetesVersion, tarballFilename string) error { + defer func() { + if err := deleteMinikube(); err != nil { + fmt.Println(err) + } + }() + + driver := kic.NewDriver(kic.Config{ + KubernetesVersion: kubernetesVersion, + ContainerRuntime: driver.Docker, + OCIBinary: oci.Docker, + MachineName: profile, + ImageDigest: kic.BaseImage, + StorePath: localpath.MiniPath(), + CPU: 2, + Memory: 4000, + APIServerPort: 8080, + }) + + baseDir := filepath.Dir(driver.GetSSHKeyPath()) + defer os.Remove(baseDir) + + if err := os.MkdirAll(baseDir, 0755); err != nil { + return errors.Wrap(err, "mkdir") + } + if err := driver.Create(); err != nil { + return errors.Wrap(err, "creating kic driver") + } + + // Now, get images to pull + imgs, err := images.Kubeadm("", kubernetesVersion) + if err != nil { + return errors.Wrap(err, "kubeadm images") + } + + for _, img := range append(imgs, kic.OverlayImage) { + cmd := exec.Command("docker", "exec", profile, "docker", "pull", img) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return errors.Wrapf(err, "downloading %s", img) + } + } + + // Transfer in k8s binaries + kcfg := config.KubernetesConfig{ + KubernetesVersion: kubernetesVersion, + } + runner := command.NewKICRunner(profile, driver.OCIBinary) + if err := bsutil.TransferBinaries(kcfg, runner); err != nil { + return errors.Wrap(err, "transferring k8s binaries") + } + // Create image tarball + if err := createImageTarball(tarballFilename); err != nil { + return errors.Wrap(err, "create tarball") + } + return copyTarballToHost(tarballFilename) +} + +func createImageTarball(tarballFilename string) error { + dirs := []string{ + fmt.Sprintf("./lib/docker/%s", dockerStorageDriver), + "./lib/docker/image", + "./lib/minikube/binaries", + } + args := []string{"exec", profile, "sudo", "tar", "-I", "lz4", "-C", "/var", "-cvf", tarballFilename} + args = append(args, dirs...) + cmd := exec.Command("docker", args...) + cmd.Stdout = os.Stdout + if err := cmd.Run(); err != nil { + return errors.Wrapf(err, "tarball cmd: %s", cmd.Args) + } + return nil +} + +func copyTarballToHost(tarballFilename string) error { + dest := filepath.Join("out/", tarballFilename) + cmd := exec.Command("docker", "cp", fmt.Sprintf("%s:/%s", profile, tarballFilename), dest) + cmd.Stdout = os.Stdout + if err := cmd.Run(); err != nil { + return errors.Wrapf(err, "cp cmd: %s", cmd.Args) + } + return nil +} + +func deleteMinikube() error { + cmd := exec.Command(minikubePath, "delete", "-p", profile) + cmd.Stdout = os.Stdout + return cmd.Run() +} diff --git a/hack/preload-images/preload_images.go b/hack/preload-images/preload_images.go index 1c6c858331..17a66806a9 100644 --- a/hack/preload-images/preload_images.go +++ b/hack/preload-images/preload_images.go @@ -18,22 +18,13 @@ package main import ( "bytes" - "flag" "fmt" - "os" "os/exec" - "path/filepath" "strings" - "github.com/pkg/errors" - "k8s.io/minikube/pkg/drivers/kic" - "k8s.io/minikube/pkg/drivers/kic/oci" - "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" - "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/command" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" - "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/download" + "k8s.io/minikube/pkg/minikube/exit" ) const ( @@ -42,123 +33,47 @@ const ( ) var ( - kubernetesVersion = "" - tarballFilename = "" - dockerStorageDriver = "" - preloadedTarballVersion = "" - containerRuntime = "" + dockerStorageDriver = "overlay2" + preloadedTarballVersion = "v1" + containerRuntimes = []string{"docker"} + kubernetesVersions = []string{ + constants.OldestKubernetesVersion, + constants.DefaultKubernetesVersion, + constants.NewestKubernetesVersion, + } ) -func init() { - flag.StringVar(&kubernetesVersion, "kubernetes-version", "", "desired kubernetes version, for example `v1.17.2`") - flag.StringVar(&dockerStorageDriver, "docker-storage-driver", "overlay2", "docker storage driver backend") - flag.StringVar(&preloadedTarballVersion, "preloaded-tarball-version", "", "preloaded tarball version") - flag.StringVar(&containerRuntime, "container-runtime", "docker", "container runtime") - - flag.Parse() - tarballFilename = fmt.Sprintf("preloaded-images-k8s-%s-%s-%s-%s.tar.lz4", preloadedTarballVersion, kubernetesVersion, containerRuntime, dockerStorageDriver) -} - func main() { if err := verifyDockerStorage(); err != nil { - fmt.Println(err) - os.Exit(1) + exit.WithError("Docker storage type is incompatible: %v\n", err) } - if err := executePreloadImages(); err != nil { - fmt.Println(err) - os.Exit(1) - } -} - -func executePreloadImages() error { - defer func() { - if err := deleteMinikube(); err != nil { - fmt.Println(err) - } - }() - - driver := kic.NewDriver(kic.Config{ - KubernetesVersion: kubernetesVersion, - ContainerRuntime: driver.Docker, - OCIBinary: oci.Docker, - MachineName: profile, - ImageDigest: kic.BaseImage, - StorePath: localpath.MiniPath(), - CPU: 2, - Memory: 4000, - APIServerPort: 8080, - }) - - baseDir := filepath.Dir(driver.GetSSHKeyPath()) - defer os.Remove(baseDir) - - if err := os.MkdirAll(baseDir, 0755); err != nil { - return errors.Wrap(err, "mkdir") - } - if err := driver.Create(); err != nil { - return errors.Wrap(err, "creating kic driver") - } - - // Now, get images to pull - imgs, err := images.Kubeadm("", kubernetesVersion) - if err != nil { - return errors.Wrap(err, "kubeadm images") - } - - for _, img := range append(imgs, kic.OverlayImage) { - cmd := exec.Command("docker", "exec", profile, "docker", "pull", img) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return errors.Wrapf(err, "downloading %s", img) + for _, kubernetesVersion := range kubernetesVersions { + for _, cr := range containerRuntimes { + tf := tarballFilename(kubernetesVersion, cr) + if tarballExists(tf) { + fmt.Printf("A preloaded tarball for k8s version %s already exists, skipping generation.\n", kubernetesVersion) + continue + } + fmt.Printf("A preloaded tarball for k8s version %s doesn't exist, generating now...\n", kubernetesVersion) + if err := generateTarball(kubernetesVersion, tf); err != nil { + exit.WithError(fmt.Sprintf("generating tarball for k8s version %s with %s", kubernetesVersion, cr), err) + } + if err := uploadTarball(tf); err != nil { + exit.WithError(fmt.Sprintf("uploading tarball for k8s version %s with %s", kubernetesVersion, cr), err) + } } } - - // Transfer in k8s binaries - kcfg := config.KubernetesConfig{ - KubernetesVersion: kubernetesVersion, - } - runner := command.NewKICRunner(profile, driver.OCIBinary) - if err := bsutil.TransferBinaries(kcfg, runner); err != nil { - return errors.Wrap(err, "transferring k8s binaries") - } - // Create image tarball - if err := createImageTarball(); err != nil { - return errors.Wrap(err, "create tarball") - } - return copyTarballToHost() } -func createImageTarball() error { - dirs := []string{ - fmt.Sprintf("./lib/docker/%s", dockerStorageDriver), - "./lib/docker/image", - "./lib/minikube/binaries", - } - args := []string{"exec", profile, "sudo", "tar", "-I", "lz4", "-C", "/var", "-cvf", tarballFilename} - args = append(args, dirs...) - cmd := exec.Command("docker", args...) - cmd.Stdout = os.Stdout - if err := cmd.Run(); err != nil { - return errors.Wrapf(err, "tarball cmd: %s", cmd.Args) - } - return nil +func tarballFilename(kubernetesVersion string, containerRuntime string) string { + return fmt.Sprintf("preloaded-images-k8s-%s-%s-%s-%s.tar.lz4", preloadedTarballVersion, kubernetesVersion, containerRuntime, dockerStorageDriver) } -func copyTarballToHost() error { - dest := filepath.Join("out/", tarballFilename) - cmd := exec.Command("docker", "cp", fmt.Sprintf("%s:/%s", profile, tarballFilename), dest) - cmd.Stdout = os.Stdout - if err := cmd.Run(); err != nil { - return errors.Wrapf(err, "cp cmd: %s", cmd.Args) - } - return nil -} - -func deleteMinikube() error { - cmd := exec.Command(minikubePath, "delete", "-p", profile) - cmd.Stdout = os.Stdout - return cmd.Run() +func tarballExists(tarballFilename string) bool { + fmt.Println("Checking if tarball already exists...") + gcsPath := fmt.Sprintf("gs://%s/%s", download.PreloadBucket, tarballFilename) + cmd := exec.Command("gsutil", "stat", gcsPath) + return cmd.Run() == nil } func verifyDockerStorage() error { diff --git a/hack/preload-images/upload.go b/hack/preload-images/upload.go new file mode 100644 index 0000000000..a2181294e6 --- /dev/null +++ b/hack/preload-images/upload.go @@ -0,0 +1,43 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "os/exec" + "path" + + "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/download" +) + +func uploadTarball(tarballFilename string) error { + // Upload tarball to GCS + hostPath := path.Join("out/", tarballFilename) + gcsDest := fmt.Sprintf("gs://%s", download.PreloadBucket) + cmd := exec.Command("gsutil", "cp", hostPath, gcsDest) + if output, err := cmd.Output(); err != nil { + return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output)) + } + // Make tarball public to all users + gcsPath := fmt.Sprintf("%s/%s", gcsDest, tarballFilename) + cmd = exec.Command("gsutil", "acl", "ch", "-u", "AllUsers:R", gcsPath) + if output, err := cmd.Output(); err != nil { + return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output)) + } + return nil +} From a83478f2681f6a88f0ebd0ad24ff0896a7e330a6 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Fri, 13 Mar 2020 16:10:44 -0700 Subject: [PATCH 059/668] Add running preload release script to github actions --- .github/workflows/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a3572ed151..0d01eef0bd 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -19,6 +19,7 @@ jobs: run : | make minikube-linux-amd64 make e2e-linux-amd64 + make upload-preloaded-images-tar cp -r test/integration/testdata ./out whoami echo github ref $GITHUB_REF From 8b364befc0046e94325de5b19f731993292ddc99 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Fri, 13 Mar 2020 16:10:55 -0700 Subject: [PATCH 060/668] Remove unused variables from Makefile --- Makefile | 2 -- 1 file changed, 2 deletions(-) diff --git a/Makefile b/Makefile index ac1787f480..8d46355ada 100755 --- a/Makefile +++ b/Makefile @@ -21,8 +21,6 @@ VERSION ?= v$(RAW_VERSION) KUBERNETES_VERSION ?= $(shell egrep "DefaultKubernetesVersion =" pkg/minikube/constants/constants.go | cut -d \" -f2) KIC_VERSION ?= $(shell egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2) -PRELOADED_TARBALL_VERSION ?= $(shell egrep "PreloadVersion =" pkg/minikube/download/preload.go | cut -d \" -f2) -PRELOADED_VOLUMES_GCS_BUCKET ?= $(shell egrep "PreloadBucket =" pkg/minikube/download/preload.go | cut -d \" -f2) # Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).0 From 89c6c72adb06d08946e9da9cf0ca41c39b5b2b96 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Fri, 13 Mar 2020 16:24:28 -0700 Subject: [PATCH 061/668] Skip preload if not running on amd64 Also make sure that preload is available before attempting to run preload. --- pkg/drivers/kic/kic.go | 4 ++++ pkg/minikube/download/preload.go | 5 +++++ pkg/minikube/node/config.go | 3 ++- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index dc9d2a5301..820748b5ee 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -119,6 +119,10 @@ func (d *Driver) Create() error { return errors.Wrap(err, "prepare kic ssh") } + // return now if no preload is available + if !download.PreloadExists(d.NodeConfig.KubernetesVersion, d.NodeConfig.ContainerRuntime) { + return nil + } t := time.Now() glog.Infof("Starting extracting preloaded images to volume") // Extract preloaded images to container diff --git a/pkg/minikube/download/preload.go b/pkg/minikube/download/preload.go index 62893edf1b..bd95377050 100644 --- a/pkg/minikube/download/preload.go +++ b/pkg/minikube/download/preload.go @@ -24,6 +24,7 @@ import ( "net/http" "os" "path" + "runtime" "cloud.google.com/go/storage" "google.golang.org/api/option" @@ -78,6 +79,10 @@ func PreloadExists(k8sVersion, containerRuntime string) bool { return false } + if runtime.GOARCH != "amd64" { + return false + } + // Omit remote check if tarball exists locally targetPath := TarballPath(k8sVersion) if _, err := os.Stat(targetPath); err == nil { diff --git a/pkg/minikube/node/config.go b/pkg/minikube/node/config.go index 46e6f32b06..a327a0532f 100644 --- a/pkg/minikube/node/config.go +++ b/pkg/minikube/node/config.go @@ -37,6 +37,7 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" @@ -75,7 +76,7 @@ func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config } // Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere. - if driver.IsVM(drvName) { + if driver.IsVM(drvName) && download.PreloadExists(k8s.KubernetesVersion, k8s.ContainerRuntime) { if err := cr.Preload(k8s); err != nil { switch err.(type) { case *cruntime.ErrISOFeature: From c3b56b646665455ff93a1e2309296850466ecd7e Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 13 Mar 2020 18:08:10 -0700 Subject: [PATCH 062/668] let's rearrange a bunch of code --- cmd/minikube/cmd/start.go | 10 +- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 7 +- pkg/minikube/bootstrapper/bsutil/kubelet.go | 3 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 3 +- pkg/minikube/cluster/cache.go | 2 +- pkg/minikube/cluster/setup.go | 96 ++++++++++++++++---- pkg/minikube/driver/driver.go | 2 +- pkg/minikube/node/config.go | 45 --------- pkg/minikube/node/start.go | 2 +- 9 files changed, 87 insertions(+), 83 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 6dc808860c..8ad78ffc6e 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -326,7 +326,7 @@ func runStart(cmd *cobra.Command, args []string) { return } - if !driver.BareMetal(driverName) && !driver.IsKIC(driverName) { + if driver.IsVM(driverName) { url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL)) if err != nil { exit.WithError("Failed to cache ISO", err) @@ -348,12 +348,6 @@ func runStart(cmd *cobra.Command, args []string) { } } - // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. - // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. - if err := config.SaveProfile(viper.GetString(config.ProfileName), &cc); err != nil { - exit.WithError("Failed to save config", err) - } - kubeconfig, err := cluster.InitialSetup(cc, n, existingAddons) if err != nil { exit.WithError("Starting node", err) @@ -361,7 +355,7 @@ func runStart(cmd *cobra.Command, args []string) { numNodes := viper.GetInt(nodes) if numNodes > 1 { - if driver.IsKIC(driverName) { + if driver.BareMetal(driverName) { out.T(out.Meh, "The none driver is not compatible with multi-node clusters.") } for i := 1; i < numNodes; i++ { diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 2f21d3563e..fa2e120acb 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -65,11 +65,6 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, r cruntime.Manager, n config.N return nil, errors.Wrap(err, "generating extra component config for kubeadm") } - controlPlaneEndpoint := cp.IP - /*if n.ControlPlane { - controlPlaneEndpoint = "localhost" - }*/ - opts := struct { CertDir string ServiceCIDR string @@ -107,7 +102,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, r cruntime.Manager, n config.N NodeIP: n.IP, // NOTE: If set to an specific VM IP, things may break if the IP changes on host restart // For multi-node, we may need to figure out an alternate strategy, like DNS or hosts files - ControlPlaneAddress: controlPlaneEndpoint, + ControlPlaneAddress: cp.IP, } if k8s.ServiceCIDR != "" { diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index 8ec9d01fc6..ce161b41da 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -26,6 +26,7 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/util" ) @@ -60,7 +61,7 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage extraOpts["node-ip"] = cp.IP } if nc.Name != "" { - extraOpts["hostname-override"] = nc.Name + extraOpts["hostname-override"] = driver.MachineName(mc, nc) } pauseImage := images.Pause(version, k8s.ImageRepository) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index ed458c16b2..09d23e2703 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -202,8 +202,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } - err = k.SetupNode(cfg) - if err != nil { + if err = k.SetupNode(cfg); err != nil { return errors.Wrap(err, "setting up node") } diff --git a/pkg/minikube/cluster/cache.go b/pkg/minikube/cluster/cache.go index 56a58a9246..6fcf303f27 100644 --- a/pkg/minikube/cluster/cache.go +++ b/pkg/minikube/cluster/cache.go @@ -52,7 +52,7 @@ func BeginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVe glog.Warningf("Error downloading preloaded artifacts will continue without preload: %v", err) } - if !viper.GetBool("cache-images") { + if !viper.GetBool(cacheImages) { return } diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index cd653bbdbd..e67a4e6ca3 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -25,6 +25,7 @@ import ( "strings" "time" + "github.com/blang/semver" "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/host" "github.com/golang/glog" @@ -37,10 +38,12 @@ import ( "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/logs" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/proxy" @@ -54,13 +57,44 @@ const ( embedCerts = "embed-certs" keepContext = "keep-context" imageRepository = "image-repository" + containerRuntime = "container-runtime" ) // InitialSetup performs all necessary operations on the initial control plane node when first spinning up a cluster func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) (*kubeconfig.Settings, error) { - _, preExists, machineAPI, host := StartMachine(&cc, &n) + var kicGroup errgroup.Group + if driver.IsKIC(cc.Driver) { + BeginDownloadKicArtifacts(&kicGroup) + } + + var cacheGroup errgroup.Group + if !driver.BareMetal(cc.Driver) { + BeginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) + } + + // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. + // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. + if err := config.SaveProfile(viper.GetString(config.ProfileName), &cc); err != nil { + exit.WithError("Failed to save config", err) + } + + HandleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion) + WaitDownloadKicArtifacts(&kicGroup) + + mRunner, preExists, machineAPI, host := StartMachine(&cc, &n) defer machineAPI.Close() + // wait for preloaded tarball to finish downloading before configuring runtimes + WaitCacheRequiredImages(&cacheGroup) + + sv, err := util.ParseKubernetesVersion(n.KubernetesVersion) + if err != nil { + return nil, err + } + + // configure the runtime (docker, containerd, crio) + cr := ConfigureRuntimes(mRunner, cc.Driver, cc.KubernetesConfig, sv) + // Must be written before bootstrap, otherwise health checks may flake due to stale IP kubeconfig, err := setupKubeconfig(host, &cc, &n, cc.Name) if err != nil { @@ -70,28 +104,13 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str // setup kubeadm (must come after setupKubeconfig) bs := setupKubeAdm(machineAPI, cc, n) - var cacheGroup errgroup.Group - if !driver.BareMetal(cc.Driver) { - BeginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) - } - - var kicGroup errgroup.Group - if driver.IsKIC(cc.Driver) { - BeginDownloadKicArtifacts(&kicGroup) - } - - HandleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion) - WaitDownloadKicArtifacts(&kicGroup) - // pull images or restart cluster out.T(out.Launch, "Launching Kubernetes ... ") err = bs.StartCluster(cc) if err != nil { - /*config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: mRunner, ImageRepository: cc.KubernetesConfig.ImageRepository, KubernetesVersion: cc.KubernetesConfig.KubernetesVersion} - cr, err := cruntime.New(config) - exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner))*/ - exit.WithError("Error starting cluster", err) + exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) } + //configureMounts() if err := CacheAndLoadImagesInConfig(); err != nil { out.T(out.FailureType, "Unable to load cached images from config file.") @@ -119,6 +138,47 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str } +// ConfigureRuntimes does what needs to happen to get a runtime going. +func ConfigureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager { + co := cruntime.Config{ + Type: viper.GetString(containerRuntime), + Runner: runner, ImageRepository: k8s.ImageRepository, + KubernetesVersion: kv, + } + cr, err := cruntime.New(co) + if err != nil { + exit.WithError("Failed runtime", err) + } + + disableOthers := true + if driver.BareMetal(drvName) { + disableOthers = false + } + + // Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere. + if driver.IsVM(drvName) { + if err := cr.Preload(k8s); err != nil { + switch err.(type) { + case *cruntime.ErrISOFeature: + out.T(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err}) + default: + glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err) + } + + if err := machine.CacheImagesForBootstrapper(k8s.ImageRepository, k8s.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil { + exit.WithError("Failed to cache images", err) + } + } + } + + err = cr.Enable(disableOthers) + if err != nil { + exit.WithError("Failed to enable container runtime", err) + } + + return cr +} + // setupKubeAdm adds any requested files into the VM before Kubernetes is started func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper { bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, n) diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index 2dce6350cd..e064f70799 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -112,7 +112,7 @@ func IsMock(name string) bool { // IsVM checks if the driver is a VM func IsVM(name string) bool { - if IsKIC(name) || IsMock(name) || BareMetal(name) { + if IsKIC(name) || BareMetal(name) { return false } return true diff --git a/pkg/minikube/node/config.go b/pkg/minikube/node/config.go index da74bce3db..ef0f66dc12 100644 --- a/pkg/minikube/node/config.go +++ b/pkg/minikube/node/config.go @@ -23,62 +23,17 @@ import ( "path/filepath" "strconv" - "github.com/blang/semver" "github.com/golang/glog" "github.com/spf13/viper" - cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/util/lock" ) -// configureRuntimes does what needs to happen to get a runtime going. -func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager { - co := cruntime.Config{ - Type: viper.GetString(containerRuntime), - Runner: runner, ImageRepository: k8s.ImageRepository, - KubernetesVersion: kv, - } - cr, err := cruntime.New(co) - if err != nil { - exit.WithError("Failed runtime", err) - } - - disableOthers := true - if driver.BareMetal(drvName) { - disableOthers = false - } - - // Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere. - if driver.IsVM(drvName) { - if err := cr.Preload(k8s); err != nil { - switch err.(type) { - case *cruntime.ErrISOFeature: - out.T(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err}) - default: - glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err) - } - - if err := machine.CacheImagesForBootstrapper(k8s.ImageRepository, k8s.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil { - exit.WithError("Failed to cache images", err) - } - } - } - - err = cr.Enable(disableOthers) - if err != nil { - exit.WithError("Failed to enable container runtime", err) - } - - return cr -} - func showVersionInfo(k8sVersion string, cr cruntime.Manager) { version, _ := cr.Version() out.T(cr.Style(), "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...", out.V{"k8sVersion": k8sVersion, "runtime": cr.Name(), "runtimeVersion": version}) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 599d4621aa..0a5e3fc095 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -64,7 +64,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo } // configure the runtime (docker, containerd, crio) - cr := configureRuntimes(runner, driverName, cc.KubernetesConfig, sv) + cr := cluster.ConfigureRuntimes(runner, driverName, cc.KubernetesConfig, sv) showVersionInfo(k8sVersion, cr) configureMounts() From e7af223d72f67d8259a24ce1cd2f4c1063bc1a33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Sun, 15 Mar 2020 12:31:14 +0100 Subject: [PATCH 063/668] Strip the version prefix before calling semver --- cmd/minikube/cmd/start.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 44e3d3210c..8b55075e9f 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -822,7 +822,7 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) repository := viper.GetString(imageRepository) mirrorCountry := strings.ToLower(viper.GetString(imageMirrorCountry)) if strings.ToLower(repository) == "auto" || mirrorCountry != "" { - found, autoSelectedRepository, err := selectImageRepository(mirrorCountry, semver.MustParse(k8sVersion)) + found, autoSelectedRepository, err := selectImageRepository(mirrorCountry, semver.MustParse(strings.TrimPrefix(k8sVersion, version.VersionPrefix))) if err != nil { exit.WithError("Failed to check main repository and mirrors for images for images", err) } From dc6b9bae2bc6dfc56908389d675ba85053753c2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Sun, 15 Mar 2020 12:52:35 +0100 Subject: [PATCH 064/668] Add basic unit test for image mirror country --- cmd/minikube/cmd/start_test.go | 44 ++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/cmd/minikube/cmd/start_test.go b/cmd/minikube/cmd/start_test.go index 23675528b6..ef7e4b7403 100644 --- a/cmd/minikube/cmd/start_test.go +++ b/cmd/minikube/cmd/start_test.go @@ -70,6 +70,50 @@ func TestGetKuberneterVersion(t *testing.T) { } } +func TestMirrorCountry(t *testing.T) { + // Set default disk size value in lieu of flag init + viper.SetDefault(humanReadableDiskSize, defaultDiskSize) + + k8sVersion := constants.DefaultKubernetesVersion + var tests = []struct { + description string + k8sVersion string + imageRepository string + mirrorCountry string + cfg *cfg.ClusterConfig + }{ + { + description: "image-repository none, image-mirror-country none", + imageRepository: "", + mirrorCountry: "", + }, + { + description: "image-repository auto, image-mirror-country none", + imageRepository: "auto", + mirrorCountry: "", + }, + { + description: "image-repository auto, image-mirror-country china", + imageRepository: "auto", + mirrorCountry: "cn", + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + cmd := &cobra.Command{} + viper.SetDefault(imageRepository, test.imageRepository) + viper.SetDefault(imageMirrorCountry, test.mirrorCountry) + config, _, err := generateCfgFromFlags(cmd, k8sVersion, "none") + if err != nil { + t.Fatalf("Got unexpected error %v during config generation", err) + } + // the result can still be "", but anyway + _ = config.KubernetesConfig.ImageRepository + }) + } +} + func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) { // Set default disk size value in lieu of flag init viper.SetDefault(humanReadableDiskSize, defaultDiskSize) From 316eef44d1a67f0e140a0d04be079c9adcad6285 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Mon, 16 Mar 2020 10:44:01 -0700 Subject: [PATCH 065/668] Run preloaded images release script in jenkins --- .github/workflows/main.yml | 1 - hack/jenkins/minikube_cross_build_and_upload.sh | 6 +++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 0d01eef0bd..a3572ed151 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -19,7 +19,6 @@ jobs: run : | make minikube-linux-amd64 make e2e-linux-amd64 - make upload-preloaded-images-tar cp -r test/integration/testdata ./out whoami echo github ref $GITHUB_REF diff --git a/hack/jenkins/minikube_cross_build_and_upload.sh b/hack/jenkins/minikube_cross_build_and_upload.sh index 382adacea7..9738cc08eb 100755 --- a/hack/jenkins/minikube_cross_build_and_upload.sh +++ b/hack/jenkins/minikube_cross_build_and_upload.sh @@ -39,7 +39,11 @@ declare -rx TAG="${ghprbActualCommit}" docker kill $(docker ps -q) || true docker rm $(docker ps -aq) || true -make -j 16 all && failed=$? || failed=$? +make -j 16 all && failed=$? || failed=$? + +echo "Running preloaded images release script..." +make upload-preloaded-images-tar + "out/minikube-$(go env GOOS)-$(go env GOARCH)" version From 577dfa339339d194f55a83b27bedee1ed0c46130 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 16 Mar 2020 11:53:07 -0700 Subject: [PATCH 066/668] it works again --- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 24 ++++++++++--------- .../bootstrapper/bsutil/kubeadm_test.go | 4 ++-- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 4 ++-- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index fa2e120acb..4004ac2e16 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -29,6 +29,7 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util" ) @@ -37,7 +38,7 @@ import ( const remoteContainerRuntime = "remote" // GenerateKubeadmYAML generates the kubeadm.yaml file -func GenerateKubeadmYAML(cc config.ClusterConfig, r cruntime.Manager, n config.Node) ([]byte, error) { +func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Manager) ([]byte, error) { k8s := cc.KubernetesConfig version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { @@ -87,19 +88,20 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, r cruntime.Manager, n config.N CertDir: vmpath.GuestKubernetesCertsDir, ServiceCIDR: constants.DefaultServiceCIDR, PodSubnet: k8s.ExtraOptions.Get("pod-network-cidr", Kubeadm), - AdvertiseAddress: cp.IP, + AdvertiseAddress: n.IP, APIServerPort: nodePort, KubernetesVersion: k8s.KubernetesVersion, EtcdDataDir: EtcdDataDir(), - ClusterName: k8s.ClusterName, - NodeName: n.Name, - CRISocket: r.SocketPath(), - ImageRepository: k8s.ImageRepository, - ComponentOptions: componentOpts, - FeatureArgs: kubeadmFeatureArgs, - NoTaintMaster: false, // That does not work with k8s 1.12+ - DNSDomain: k8s.DNSDomain, - NodeIP: n.IP, + ClusterName: cc.Name, + //kubeadm uses NodeName as the --hostname-override parameter, so this needs to be the name of the machine + NodeName: driver.MachineName(cc, n), + CRISocket: r.SocketPath(), + ImageRepository: k8s.ImageRepository, + ComponentOptions: componentOpts, + FeatureArgs: kubeadmFeatureArgs, + NoTaintMaster: false, // That does not work with k8s 1.12+ + DNSDomain: k8s.DNSDomain, + NodeIP: n.IP, // NOTE: If set to an specific VM IP, things may break if the IP changes on host restart // For multi-node, we may need to figure out an alternate strategy, like DNS or hosts files ControlPlaneAddress: cp.IP, diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go index 4c366bd96b..806359513a 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go @@ -129,7 +129,7 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) { cfg.KubernetesConfig.KubernetesVersion = version + ".0" cfg.KubernetesConfig.ClusterName = "kubernetes" - got, err := GenerateKubeadmYAML(cfg, runtime, cfg.Nodes[0]) + got, err := GenerateKubeadmYAML(cfg, cfg.Nodes[0], runtime) if err != nil && !tc.shouldErr { t.Fatalf("got unexpected error generating config: %v", err) } @@ -210,7 +210,7 @@ func TestGenerateKubeadmYAML(t *testing.T) { cfg.KubernetesConfig.KubernetesVersion = version + ".0" cfg.KubernetesConfig.ClusterName = "kubernetes" - got, err := GenerateKubeadmYAML(cfg, runtime, cfg.Nodes[0]) + got, err := GenerateKubeadmYAML(cfg, cfg.Nodes[0], runtime) if err != nil && !tc.shouldErr { t.Fatalf("got unexpected error generating config: %v", err) } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 73ec4e4ced..11b2e4d426 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -386,7 +386,7 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC }() // Join the master by specifying its token - joinCmd = fmt.Sprintf("%s --v=10 --node-name=%s", joinCmd, n.Name) + joinCmd = fmt.Sprintf("%s --v=10 --node-name=%s", joinCmd, driver.MachineName(cc, n)) out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd)) if err != nil { return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out) @@ -473,7 +473,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { // UpdateNode updates a node. func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cruntime.Manager) error { - kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, r, n) + kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, n, r) if err != nil { return errors.Wrap(err, "generating kubeadm cfg") } From c1c26538797f67a4437f8dd6a572260676a2a8e5 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 16 Mar 2020 12:25:59 -0700 Subject: [PATCH 067/668] fix unit tests --- .../bootstrapper/bsutil/ktmpl/v1beta2.go | 2 +- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 1 - .../bootstrapper/bsutil/kubeadm_test.go | 20 +++++++++---------- .../testdata/v1.11/containerd-api-port.yaml | 2 +- .../v1.11/containerd-pod-network-cidr.yaml | 2 +- .../bsutil/testdata/v1.11/containerd.yaml | 2 +- .../testdata/v1.11/crio-options-gates.yaml | 2 +- .../bsutil/testdata/v1.11/crio.yaml | 2 +- .../bsutil/testdata/v1.11/default.yaml | 2 +- .../testdata/v1.11/image-repository.yaml | 2 +- .../bsutil/testdata/v1.11/options.yaml | 2 +- .../testdata/v1.12/containerd-api-port.yaml | 4 ++-- .../v1.12/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.12/containerd.yaml | 4 ++-- .../testdata/v1.12/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.12/crio.yaml | 4 ++-- .../bsutil/testdata/v1.12/default.yaml | 4 ++-- .../bsutil/testdata/v1.12/dns.yaml | 4 ++-- .../testdata/v1.12/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.12/options.yaml | 4 ++-- .../testdata/v1.13/containerd-api-port.yaml | 4 ++-- .../v1.13/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.13/containerd.yaml | 4 ++-- .../testdata/v1.13/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.13/crio.yaml | 4 ++-- .../bsutil/testdata/v1.13/default.yaml | 4 ++-- .../bsutil/testdata/v1.13/dns.yaml | 4 ++-- .../testdata/v1.13/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.13/options.yaml | 4 ++-- .../testdata/v1.14/containerd-api-port.yaml | 4 ++-- .../v1.14/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.14/containerd.yaml | 4 ++-- .../testdata/v1.14/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.14/crio.yaml | 4 ++-- .../bsutil/testdata/v1.14/default.yaml | 4 ++-- .../bsutil/testdata/v1.14/dns.yaml | 4 ++-- .../testdata/v1.14/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.14/options.yaml | 4 ++-- .../testdata/v1.15/containerd-api-port.yaml | 4 ++-- .../v1.15/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.15/containerd.yaml | 4 ++-- .../testdata/v1.15/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.15/crio.yaml | 4 ++-- .../bsutil/testdata/v1.15/default.yaml | 4 ++-- .../bsutil/testdata/v1.15/dns.yaml | 4 ++-- .../testdata/v1.15/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.15/options.yaml | 4 ++-- .../testdata/v1.16/containerd-api-port.yaml | 4 ++-- .../v1.16/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.16/containerd.yaml | 4 ++-- .../testdata/v1.16/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.16/crio.yaml | 4 ++-- .../bsutil/testdata/v1.16/default.yaml | 4 ++-- .../bsutil/testdata/v1.16/dns.yaml | 4 ++-- .../testdata/v1.16/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.16/options.yaml | 4 ++-- .../testdata/v1.17/containerd-api-port.yaml | 4 ++-- .../v1.17/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.17/containerd.yaml | 4 ++-- .../testdata/v1.17/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.17/crio.yaml | 4 ++-- .../bsutil/testdata/v1.17/default.yaml | 4 ++-- .../bsutil/testdata/v1.17/dns.yaml | 4 ++-- .../testdata/v1.17/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.17/options.yaml | 4 ++-- .../testdata/v1.18/containerd-api-port.yaml | 4 ++-- .../v1.18/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.18/containerd.yaml | 4 ++-- .../testdata/v1.18/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.18/crio.yaml | 4 ++-- .../bsutil/testdata/v1.18/default.yaml | 4 ++-- .../bsutil/testdata/v1.18/dns.yaml | 4 ++-- .../testdata/v1.18/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.18/options.yaml | 4 ++-- .../testdata/v1.19/containerd-api-port.yaml | 4 ++-- .../v1.19/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.19/containerd.yaml | 4 ++-- .../testdata/v1.19/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.19/crio.yaml | 4 ++-- .../bsutil/testdata/v1.19/default.yaml | 4 ++-- .../bsutil/testdata/v1.19/dns.yaml | 4 ++-- .../testdata/v1.19/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.19/options.yaml | 4 ++-- pkg/minikube/node/node.go | 5 ++--- 84 files changed, 165 insertions(+), 167 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go index c4718f9c98..c00835e8e7 100644 --- a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go +++ b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go @@ -56,7 +56,7 @@ kind: ClusterConfiguration {{range $i, $val := .FeatureArgs}}{{$i}}: {{$val}} {{end -}}{{end -}} certificatesDir: {{.CertDir}} -clusterName: kubernetes +clusterName: mk controlPlaneEndpoint: {{.ControlPlaneAddress}}:{{.APIServerPort}} controllerManager: {} dns: diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 4004ac2e16..8b675ae644 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -129,7 +129,6 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana if err := configTmpl.Execute(&b, opts); err != nil { return nil, err } - fmt.Printf("%s OPTS=%+v\n", n.Name, opts) glog.Infof("kubeadm config:\n%s\n", b.String()) return b.Bytes(), nil } diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go index 806359513a..a2b53c3ea1 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go @@ -108,7 +108,7 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) { shouldErr bool cfg config.ClusterConfig }{ - {"dns", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{DNSDomain: "1.1.1.1"}}}, + {"dns", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{DNSDomain: "1.1.1.1"}}}, } for _, version := range versions { for _, tc := range tests { @@ -174,15 +174,15 @@ func TestGenerateKubeadmYAML(t *testing.T) { shouldErr bool cfg config.ClusterConfig }{ - {"default", "docker", false, config.ClusterConfig{}}, - {"containerd", "containerd", false, config.ClusterConfig{}}, - {"crio", "crio", false, config.ClusterConfig{}}, - {"options", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}}, - {"crio-options-gates", "crio", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}}, - {"unknown-component", "docker", true, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}}, - {"containerd-api-port", "containerd", false, config.ClusterConfig{Nodes: []config.Node{{Port: 12345}}}}, - {"containerd-pod-network-cidr", "containerd", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}}, - {"image-repository", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}}, + {"default", "docker", false, config.ClusterConfig{Name: "mk"}}, + {"containerd", "containerd", false, config.ClusterConfig{Name: "mk"}}, + {"crio", "crio", false, config.ClusterConfig{Name: "mk"}}, + {"options", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}}, + {"crio-options-gates", "crio", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}}, + {"unknown-component", "docker", true, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}}, + {"containerd-api-port", "containerd", false, config.ClusterConfig{Name: "mk", Nodes: []config.Node{{Port: 12345}}}}, + {"containerd-pod-network-cidr", "containerd", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}}, + {"image-repository", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}}, } for _, version := range versions { for _, tc := range tests { diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml index 7d94020c6f..ae79c8aa7a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 12345 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml index f66eec734e..a8ce3c8dc7 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml index f66eec734e..a8ce3c8dc7 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml index 30b1986325..1a4d370e84 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml index 4693643125..e179fbf4e3 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml index 5c2861101e..68429da7bc 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml index 7d383865f8..651706493c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml index 26fbfead4b..5b192e1cfd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml index ba34af30df..adf230658d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:12345 +controlPlaneEndpoint: 1.1.1.1:12345 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml index 0d821692e5..300ee2825f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml index 8ac889649f..9866d944d9 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml index 5fb536a9f5..c8e2fbb46a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml @@ -30,9 +30,9 @@ schedulerExtraArgs: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml index a2e258468b..834021df94 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml index 6db4345453..3c8b8b41a8 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml index e0b60901ab..d6154f4ecd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml index 595bd0c94c..e9dd51d811 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml @@ -23,9 +23,9 @@ imageRepository: test/repo apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml index 04237f4db1..a49db3c29f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml @@ -27,9 +27,9 @@ controllerManagerExtraArgs: schedulerExtraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml index e4e9c885b2..8d90c3e212 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:12345 +controlPlaneEndpoint: 1.1.1.1:12345 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml index ee58cf2201..1788a1adb8 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml index a719307679..770f46cc0f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml index be69a16ec7..326912679e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml @@ -30,9 +30,9 @@ schedulerExtraArgs: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml index c195ffc2ba..08646f704f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml index f7fc9b5199..25d166e0dc 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml index d9bb198b8f..eb057faf76 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml index 0a1e7bab7b..d828d72006 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml @@ -23,9 +23,9 @@ imageRepository: test/repo apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml index 3aa0b74754..5fe5d326bc 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml @@ -27,9 +27,9 @@ controllerManagerExtraArgs: schedulerExtraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml index 741ad12afb..64efcf3938 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml index 54abf05793..6ef28c1c8d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml index df4740aaeb..97b4065593 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml index 513e1f803a..cf8a3e4728 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml index 1053c5c42f..3ef27c9b9f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml index 117c9070bf..746eb9fb7d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml index 67c0df83a3..a4e2567756 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml index c720ebac42..aedd2a9047 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml index 35aa4982b2..81980c953d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml index 3048061426..4e6bbead95 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml index 3a180ccafe..9a9a5c60f6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml index 75a083a4ce..cacacc7e43 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml index 587faaf4de..c78edc0119 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml index 680b24fe8d..47db96b5c2 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml index 4ac5254431..d68ef1b1f2 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml index 2403f96063..1e79a74a1c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml index 9e3d3e5088..f11df32d8b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml index cf7d8c2964..d277ac59e6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml index 2f1d050a40..758f7b2f62 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml index ad749f03cc..15802a1859 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml index ddc2d7cf74..0876e3bdde 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml index adbc88e1d7..6ca53c67e9 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml index d401b50e81..0b87277ba2 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml index bf4ee2a96a..765a4b2398 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml index 9b464ae194..1105d6fc3c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml index 140db5ca32..5b78859ead 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml index c7623c0e0f..cb4d159683 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml index f10bad3678..240d23984b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml index a8b4286a19..0ce3766fdb 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml index 4eb28ddba9..c5ecd93bc5 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml index 78f465b4ab..cae9608f04 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml index 179f18da35..d86d853915 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml index b223f2a7c5..eef9a6c7ae 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml index 5e8102e6ed..86f4d03bd9 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml index 676e6de52b..7b215a01b7 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml index 608569fb84..dba5ff15d2 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml index af5af1f022..77acbb9ed7 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml index b1f3d8214f..5abe34481e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml index 317e578be8..a53b109047 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml index 402ef57a02..0235b34b1f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml index 74a7bd1536..d2907dcbc3 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml index 7caa19fa2b..c921f54cdf 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml index 80a20ba800..bd13212add 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml index 7c1b7989db..ef954470c0 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml index 6205c948bc..743cbb4e2d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml index ba8872e7ce..1f5a6f8df8 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml index 6b1a12c922..4cbe5b28d8 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml index c92bd1a314..c2b2c89a2b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml index 405c3354d2..21f5c1080a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml index c1dd4916df..eb0abc25b0 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml index 9f76b719a7..692a5925a3 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml index bc0db1cb07..e384cdda1f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml index 14cc7bb8b6..7b50680f2c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml index 7b60865e15..a134555f6a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 41d518f3f2..7458ff80a3 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -27,9 +27,8 @@ import ( // TODO: Share these between cluster and node packages const ( - containerRuntime = "container-runtime" - mountString = "mount-string" - createMount = "mount" + mountString = "mount-string" + createMount = "mount" ) // Add adds a new node config to an existing cluster. From 376111bae8c98b5ced850ed8e4745143bcf3e7d9 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 16 Mar 2020 12:38:59 -0700 Subject: [PATCH 068/668] fix unit tests pt 2 --- pkg/minikube/bootstrapper/bsutil/kubelet_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go index 3019ee1f52..1af1dc525c 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go @@ -37,6 +37,7 @@ func TestGenerateKubeletConfig(t *testing.T) { { description: "old docker", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.OldestKubernetesVersion, ContainerRuntime: "docker", @@ -62,6 +63,7 @@ ExecStart=/var/lib/minikube/binaries/v1.11.10/kubelet --allow-privileged=true -- { description: "newest cri runtime", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.NewestKubernetesVersion, ContainerRuntime: "cri-o", @@ -87,6 +89,7 @@ ExecStart=/var/lib/minikube/binaries/v1.18.0-beta.2/kubelet --authorization-mode { description: "default containerd runtime", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "containerd", @@ -112,6 +115,7 @@ ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhoo { description: "default containerd runtime with IP override", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "containerd", @@ -144,6 +148,7 @@ ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhoo { description: "docker with custom image repository", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "docker", From f99d335fed4755e512e22da115a80121b3c45fbe Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 16 Mar 2020 15:36:05 -0700 Subject: [PATCH 069/668] fix docker driver --- cmd/minikube/cmd/start.go | 25 +++++++++++++------------ pkg/minikube/driver/driver.go | 8 ++++++++ pkg/provision/provision.go | 3 ++- 3 files changed, 23 insertions(+), 13 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 3cfdc19f39..6b063a88ba 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -357,18 +357,19 @@ func runStart(cmd *cobra.Command, args []string) { if numNodes > 1 { if driver.BareMetal(driverName) { out.T(out.Meh, "The none driver is not compatible with multi-node clusters.") - } - for i := 1; i < numNodes; i++ { - nodeName := fmt.Sprintf("m%02d", i+1) - n := config.Node{ - Name: nodeName, - Worker: true, - ControlPlane: false, - KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, - } - err := node.Add(&cc, n) - if err != nil { - exit.WithError("adding node", err) + } else { + for i := 1; i < numNodes; i++ { + nodeName := fmt.Sprintf("m%02d", i+1) + n := config.Node{ + Name: nodeName, + Worker: true, + ControlPlane: false, + KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, + } + err := node.Add(&cc, n) + if err != nil { + exit.WithError("adding node", err) + } } } } diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index e064f70799..170b99e71a 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -236,3 +236,11 @@ func MachineName(cc config.ClusterConfig, n config.Node) string { } return fmt.Sprintf("%s-%s", cc.Name, n.Name) } + +// ClusterNameFromMachine retrieves the cluster name embedded in the machine name +func ClusterNameFromMachine(name string) string { + if strings.Contains(name, "-") { + return strings.Split(name, "-")[0] + } + return name +} diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index ff5f08fef8..fd84405266 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -39,6 +39,7 @@ import ( "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/sshutil" ) @@ -195,7 +196,7 @@ func setRemoteAuthOptions(p provision.Provisioner) auth.Options { } func setContainerRuntimeOptions(name string, p miniProvisioner) error { - cluster := strings.Split(name, "-")[0] + cluster := driver.ClusterNameFromMachine(name) c, err := config.Load(cluster) if err != nil { return errors.Wrap(err, "getting cluster config") From c9c597c2e1f23a1fedec6bb7a0cd93b1cbd8d112 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Mon, 16 Mar 2020 17:48:00 -0700 Subject: [PATCH 070/668] get last 30 releases of k8s from github --- go.mod | 2 + go.sum | 2 + .../minikube_cross_build_and_upload.sh | 4 -- hack/preload-images/kubernetes.go | 40 +++++++++++++++++++ hack/preload-images/preload_images.go | 11 +++-- 5 files changed, 49 insertions(+), 10 deletions(-) create mode 100644 hack/preload-images/kubernetes.go diff --git a/go.mod b/go.mod index 6777081f84..b1861de795 100644 --- a/go.mod +++ b/go.mod @@ -27,6 +27,8 @@ require ( github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/google/go-cmp v0.3.1 github.com/google/go-containerregistry v0.0.0-20200131185320-aec8da010de2 + github.com/google/go-github v17.0.0+incompatible + github.com/google/go-github/v29 v29.0.3 // indirect github.com/googleapis/gnostic v0.3.0 // indirect github.com/hashicorp/go-getter v1.4.0 github.com/hashicorp/go-retryablehttp v0.5.4 diff --git a/go.sum b/go.sum index 0cd403ed47..112a60917c 100644 --- a/go.sum +++ b/go.sum @@ -335,6 +335,8 @@ github.com/google/go-containerregistry v0.0.0-20200131185320-aec8da010de2 h1:/z0 github.com/google/go-containerregistry v0.0.0-20200131185320-aec8da010de2/go.mod h1:Wtl/v6YdQxv397EREtzwgd9+Ud7Q5D8XMbi3Zazgkrs= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-github/v29 v29.0.3 h1:IktKCTwU//aFHnpA+2SLIi7Oo9uhAzgsdZNbcAqhgdc= +github.com/google/go-github/v29 v29.0.3/go.mod h1:CHKiKKPHJ0REzfwc14QMklvtHwCveD0PxlMjLlzAM5E= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= diff --git a/hack/jenkins/minikube_cross_build_and_upload.sh b/hack/jenkins/minikube_cross_build_and_upload.sh index 9738cc08eb..ff9dc97092 100755 --- a/hack/jenkins/minikube_cross_build_and_upload.sh +++ b/hack/jenkins/minikube_cross_build_and_upload.sh @@ -41,10 +41,6 @@ docker kill $(docker ps -q) || true docker rm $(docker ps -aq) || true make -j 16 all && failed=$? || failed=$? -echo "Running preloaded images release script..." -make upload-preloaded-images-tar - - "out/minikube-$(go env GOOS)-$(go env GOARCH)" version gsutil cp "gs://${bucket}/logs/index.html" \ diff --git a/hack/preload-images/kubernetes.go b/hack/preload-images/kubernetes.go new file mode 100644 index 0000000000..1114d39e12 --- /dev/null +++ b/hack/preload-images/kubernetes.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + + "github.com/golang/glog" + "github.com/google/go-github/github" +) + +// RecentK8sVersions returns the most recent k8s version, usually around 30 +func RecentK8sVersions() ([]string, error) { + client := github.NewClient(nil) + k8s := "kubernetes" + list, _, err := client.Repositories.ListReleases(context.Background(), k8s, k8s, &github.ListOptions{}) + if err != nil { + return nil, err + } + var releases []string + for _, r := range list { + releases = append(releases, r.GetTagName()) + } + glog.Infof("Got releases: %v", releases) + return releases, nil +} diff --git a/hack/preload-images/preload_images.go b/hack/preload-images/preload_images.go index 17a66806a9..8f25e58040 100644 --- a/hack/preload-images/preload_images.go +++ b/hack/preload-images/preload_images.go @@ -22,7 +22,6 @@ import ( "os/exec" "strings" - "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/exit" ) @@ -36,17 +35,17 @@ var ( dockerStorageDriver = "overlay2" preloadedTarballVersion = "v1" containerRuntimes = []string{"docker"} - kubernetesVersions = []string{ - constants.OldestKubernetesVersion, - constants.DefaultKubernetesVersion, - constants.NewestKubernetesVersion, - } ) func main() { if err := verifyDockerStorage(); err != nil { exit.WithError("Docker storage type is incompatible: %v\n", err) } + kubernetesVersions, err := RecentK8sVersions() + if err != nil { + exit.WithError("Unable to get recent k8s versions: %v\n", err) + } + for _, kubernetesVersion := range kubernetesVersions { for _, cr := range containerRuntimes { tf := tarballFilename(kubernetesVersion, cr) From 0acb06bd34bf7ccdee14724329cfddcc443b5f87 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Mon, 16 Mar 2020 17:51:41 -0700 Subject: [PATCH 071/668] add flag for kubernetes version --- go.sum | 5 ++++ hack/preload-images/preload_images.go | 34 +++++++++++++++++++-------- 2 files changed, 29 insertions(+), 10 deletions(-) diff --git a/go.sum b/go.sum index 112a60917c..dfeb89780e 100644 --- a/go.sum +++ b/go.sum @@ -68,6 +68,7 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYU github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= github.com/aws/aws-sdk-go v1.15.78 h1:LaXy6lWR0YK7LKyuU0QWy2ws/LWTPfYV/UgfiBu4tvY= @@ -93,6 +94,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/c4milo/gotoolkit v0.0.0-20170318115440-bcc06269efa9 h1:+ziP/wVJWuAORkjv7386TRidVKY57X0bXBZFMeFlW+U= @@ -260,6 +262,7 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-ozzo/ozzo-validation v3.5.0+incompatible h1:sUy/in/P6askYr16XJgTKq/0SZhiWsdg4WZGaLsGQkM= github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= @@ -393,7 +396,9 @@ github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible h1:ysqc8k973k1lLJ4BOOHAkx14K2nt4cLjsIm+hwWDZDE= github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= +github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6 h1:oJ/NLadJn5HoxvonA6VxG31lg0d6XOURNA09BTtM4fY= github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214 h1:WgfvpuKg42WVLkxNwzfFraXkTXPK36bMqXvMFN67clI= github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214/go.mod h1:kj6hFWqfwSjFjLnYW5PK1DoxZ4O0uapwHRmd9jhln4E= diff --git a/hack/preload-images/preload_images.go b/hack/preload-images/preload_images.go index 8f25e58040..7a9956d9c0 100644 --- a/hack/preload-images/preload_images.go +++ b/hack/preload-images/preload_images.go @@ -18,6 +18,7 @@ package main import ( "bytes" + "flag" "fmt" "os/exec" "strings" @@ -35,30 +36,43 @@ var ( dockerStorageDriver = "overlay2" preloadedTarballVersion = "v1" containerRuntimes = []string{"docker"} + k8sVersion string + k8sVersions []string ) +func init() { + flag.StringVar(&k8sVersion, "kubernetes-version", "", "desired kubernetes version, for example `v1.17.2`") + flag.Parse() + if k8sVersion != "" { + k8sVersions = append(k8sVersions, k8sVersion) + } +} + func main() { if err := verifyDockerStorage(); err != nil { exit.WithError("Docker storage type is incompatible: %v\n", err) } - kubernetesVersions, err := RecentK8sVersions() - if err != nil { - exit.WithError("Unable to get recent k8s versions: %v\n", err) + if k8sVersions == nil { + var err error + k8sVersions, err = RecentK8sVersions() + if err != nil { + exit.WithError("Unable to get recent k8s versions: %v\n", err) + } } - for _, kubernetesVersion := range kubernetesVersions { + for _, kv := range k8sVersions { for _, cr := range containerRuntimes { - tf := tarballFilename(kubernetesVersion, cr) + tf := tarballFilename(kv, cr) if tarballExists(tf) { - fmt.Printf("A preloaded tarball for k8s version %s already exists, skipping generation.\n", kubernetesVersion) + fmt.Printf("A preloaded tarball for k8s version %s already exists, skipping generation.\n", kv) continue } - fmt.Printf("A preloaded tarball for k8s version %s doesn't exist, generating now...\n", kubernetesVersion) - if err := generateTarball(kubernetesVersion, tf); err != nil { - exit.WithError(fmt.Sprintf("generating tarball for k8s version %s with %s", kubernetesVersion, cr), err) + fmt.Printf("A preloaded tarball for k8s version %s doesn't exist, generating now...\n", kv) + if err := generateTarball(kv, tf); err != nil { + exit.WithError(fmt.Sprintf("generating tarball for k8s version %s with %s", kv, cr), err) } if err := uploadTarball(tf); err != nil { - exit.WithError(fmt.Sprintf("uploading tarball for k8s version %s with %s", kubernetesVersion, cr), err) + exit.WithError(fmt.Sprintf("uploading tarball for k8s version %s with %s", kv, cr), err) } } } From e053e4f235a7dfc88b0964372b9bc00b14447496 Mon Sep 17 00:00:00 2001 From: vikkyomkar Date: Tue, 17 Mar 2020 13:21:05 +0530 Subject: [PATCH 072/668] Add --vm flag for users who want to autoselect only VM's --- cmd/minikube/cmd/start.go | 3 ++- pkg/minikube/driver/driver.go | 27 ++++++++++++++++++++------- pkg/minikube/driver/driver_test.go | 2 +- pkg/minikube/registry/global.go | 3 --- 4 files changed, 23 insertions(+), 12 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 44e3d3210c..fea525383b 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -194,6 +194,7 @@ func initDriverFlags() { startCmd.Flags().String("driver", "", fmt.Sprintf("Driver is one of: %v (defaults to auto-detect)", driver.DisplaySupportedDrivers())) startCmd.Flags().String("vm-driver", "", "DEPRECATED, use `driver` instead.") startCmd.Flags().Bool(disableDriverMounts, false, "Disables the filesystem mounts provided by the hypervisors") + startCmd.Flags().Bool("vm", false, "Filter to use only VM Drivers") // kvm2 startCmd.Flags().String(kvmNetwork, "default", "The KVM network name. (kvm2 driver only)") @@ -465,7 +466,7 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState { return ds } - pick, alts := driver.Suggest(driver.Choices()) + pick, alts := driver.Suggest(driver.Choices(viper.GetBool("vm"))) if pick.Name == "" { exit.WithCodeT(exit.Config, "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/") } diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index 2dce6350cd..e5ea05b00a 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -164,14 +164,27 @@ func FlagDefaults(name string) FlagHints { } // Choices returns a list of drivers which are possible on this system -func Choices() []registry.DriverState { +func Choices(vm bool) []registry.DriverState { options := registry.Available() - - // Descending priority for predictability and appearance - sort.Slice(options, func(i, j int) bool { - return options[i].Priority > options[j].Priority - }) - return options + if vm { + var vmOptions []registry.DriverState + for _, ds := range options { + if IsVM(ds.Name) { + vmOptions = append(vmOptions,ds) + } + } + // Descending priority for predictability and appearance + sort.Slice(vmOptions, func(i, j int) bool { + return vmOptions[i].Priority > vmOptions[j].Priority + }) + return vmOptions + }else { + // Descending priority for predictability and appearance + sort.Slice(options, func(i, j int) bool { + return options[i].Priority > options[j].Priority + }) + return options + } } // Suggest returns a suggested driver from a set of options diff --git a/pkg/minikube/driver/driver_test.go b/pkg/minikube/driver/driver_test.go index 8f9be829ad..5d0bfd4009 100644 --- a/pkg/minikube/driver/driver_test.go +++ b/pkg/minikube/driver/driver_test.go @@ -162,7 +162,7 @@ func TestSuggest(t *testing.T) { } } - got := Choices() + got := Choices(false) gotNames := []string{} for _, c := range got { gotNames = append(gotNames, c.Name) diff --git a/pkg/minikube/registry/global.go b/pkg/minikube/registry/global.go index 301f61cb9f..d53168b48d 100644 --- a/pkg/minikube/registry/global.go +++ b/pkg/minikube/registry/global.go @@ -1,12 +1,9 @@ /* Copyright 2018 The Kubernetes Authors All rights reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. From 3216a03c00ee60b93f1102e5ad62a0cb31b1a150 Mon Sep 17 00:00:00 2001 From: vikkyomkar Date: Tue, 17 Mar 2020 13:22:24 +0530 Subject: [PATCH 073/668] Add --vm flag for users who want to autoselect only VM's --- pkg/minikube/registry/global.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/minikube/registry/global.go b/pkg/minikube/registry/global.go index d53168b48d..301f61cb9f 100644 --- a/pkg/minikube/registry/global.go +++ b/pkg/minikube/registry/global.go @@ -1,9 +1,12 @@ /* Copyright 2018 The Kubernetes Authors All rights reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. From 84939da8e3c27407dbf6c518682d636f6d21db24 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 17 Mar 2020 00:58:46 -0700 Subject: [PATCH 074/668] fix docker driver again --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 8 ++-- pkg/minikube/cluster/setup.go | 42 +++++++++++++++----- 2 files changed, 37 insertions(+), 13 deletions(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 11b2e4d426..6bd2403b71 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -202,16 +202,16 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } - if err = k.SetupNode(cfg); err != nil { - return errors.Wrap(err, "setting up node") - } - c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), bsutil.KubeadmYamlPath, extraFlags, strings.Join(ignore, ","))) rr, err := k.c.RunCmd(c) if err != nil { return errors.Wrapf(err, "init failed. output: %q", rr.Output()) } + if err = k.SetupNode(cfg); err != nil { + return errors.Wrap(err, "setting up node") + } + if err := bsutil.AdjustResourceLimits(k.c); err != nil { glog.Warningf("unable to adjust resource limits: %v", err) } diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index e67a4e6ca3..513154ccce 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -29,10 +29,12 @@ import ( "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/host" "github.com/golang/glog" + "github.com/pkg/errors" "github.com/spf13/viper" "golang.org/x/sync/errgroup" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/addons" + "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/command" @@ -198,18 +200,14 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) return bs } -func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { - addr, err := h.Driver.GetURL() +func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { + addr, err := apiServerURL(*h, *cc, *n) if err != nil { - exit.WithError("Failed to get driver URL", err) - } - if !driver.IsKIC(h.DriverName) { - addr = strings.Replace(addr, "tcp://", "https://", -1) - addr = strings.Replace(addr, ":2376", ":"+strconv.Itoa(n.Port), -1) + exit.WithError("Failed to get API Server URL", err) } - if c.KubernetesConfig.APIServerName != constants.APIServerName { - addr = strings.Replace(addr, n.IP, c.KubernetesConfig.APIServerName, -1) + if cc.KubernetesConfig.APIServerName != constants.APIServerName { + addr = strings.Replace(addr, n.IP, cc.KubernetesConfig.APIServerName, -1) } kcs := &kubeconfig.Settings{ ClusterName: clusterName, @@ -228,6 +226,31 @@ func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clus return kcs, nil } +func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) { + hostname := "" + port := n.Port + var err error + if driver.IsKIC(h.DriverName) { + // for kic drivers we use 127.0.0.1 instead of node IP, + // because of Docker on MacOs limitations for reaching to container's IP. + hostname = oci.DefaultBindIPV4 + port, err = oci.ForwardedPort(h.DriverName, h.Name, port) + if err != nil { + return "", errors.Wrap(err, "host port binding") + } + } else { + hostname, err = h.Driver.GetIP() + if err != nil { + return "", errors.Wrap(err, "get ip") + } + } + + if cc.KubernetesConfig.APIServerName != constants.APIServerName { + hostname = cc.KubernetesConfig.APIServerName + } + return fmt.Sprintf("https://" + net.JoinHostPort(hostname, strconv.Itoa(port))), nil +} + // StartMachine starts a VM func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { m, err := machine.NewAPIClient() @@ -248,6 +271,7 @@ func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command. out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) } + // Save IP to config file for subsequent use node.IP = ip err = config.SaveNode(cfg, node) if err != nil { From 37b23cfd6d02dd9f461f9c66054cc01d1759cebc Mon Sep 17 00:00:00 2001 From: vikkyomkar Date: Tue, 17 Mar 2020 13:49:22 +0530 Subject: [PATCH 075/668] Add --vm flag for users who want to autoselect only VM's --- pkg/minikube/driver/driver.go | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index e5ea05b00a..b790af5c9b 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -165,26 +165,22 @@ func FlagDefaults(name string) FlagHints { // Choices returns a list of drivers which are possible on this system func Choices(vm bool) []registry.DriverState { + var drivers []registry.DriverState options := registry.Available() if vm { - var vmOptions []registry.DriverState for _, ds := range options { if IsVM(ds.Name) { - vmOptions = append(vmOptions,ds) + drivers = append(drivers, ds) } } - // Descending priority for predictability and appearance - sort.Slice(vmOptions, func(i, j int) bool { - return vmOptions[i].Priority > vmOptions[j].Priority - }) - return vmOptions - }else { - // Descending priority for predictability and appearance - sort.Slice(options, func(i, j int) bool { - return options[i].Priority > options[j].Priority - }) - return options + } else { + drivers = options } + // Descending priority for predictability and appearance + sort.Slice(options, func(i, j int) bool { + return options[i].Priority > options[j].Priority + }) + return drivers } // Suggest returns a suggested driver from a set of options From ce5bf6c0732fe546f3c5f1310cc6d65c343a8e9d Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Tue, 17 Mar 2020 12:10:02 -0700 Subject: [PATCH 076/668] Version bump to v1.9.0-beta.1 --- Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index bd54cff886..4732bc6763 100755 --- a/Makefile +++ b/Makefile @@ -14,9 +14,9 @@ # Bump these on release - and please check ISO_VERSION for correctness. VERSION_MAJOR ?= 1 -VERSION_MINOR ?= 8 -VERSION_BUILD ?= 2 -RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).${VERSION_BUILD} +VERSION_MINOR ?= 9 +VERSION_BUILD ?= 0-beta.1 +RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD) VERSION ?= v$(RAW_VERSION) KUBERNETES_VERSION ?= $(shell egrep "DefaultKubernetesVersion =" pkg/minikube/constants/constants.go | cut -d \" -f2) @@ -25,7 +25,7 @@ PRELOADED_TARBALL_VERSION ?= $(shell egrep "PreloadVersion =" pkg/minikube/downl PRELOADED_VOLUMES_GCS_BUCKET ?= $(shell egrep "PreloadBucket =" pkg/minikube/download/preload.go | cut -d \" -f2) # Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions -ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).0 +ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD) # Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta DEB_VERSION ?= $(subst -,~,$(RAW_VERSION)) RPM_VERSION ?= $(DEB_VERSION) From 14b03f1f75425502f0d965123f1c270be55d0ee8 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Tue, 17 Mar 2020 12:13:16 -0700 Subject: [PATCH 077/668] Update to Kubernetes v1.18.0-rc.1 by default --- pkg/minikube/bootstrapper/bsutil/kubelet_test.go | 8 ++++---- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 2 +- pkg/minikube/constants/constants.go | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go index 3019ee1f52..3902af1198 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go @@ -79,7 +79,7 @@ Wants=crio.service [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.18.0-beta.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m +ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m [Install] `, @@ -104,7 +104,7 @@ Wants=containerd.service [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m +ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m [Install] `, @@ -136,7 +136,7 @@ Wants=containerd.service [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m +ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m [Install] `, @@ -162,7 +162,7 @@ Wants=docker.socket [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.1 --pod-manifest-path=/etc/kubernetes/manifests +ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.2 --pod-manifest-path=/etc/kubernetes/manifests [Install] `, diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 92f896e4ef..8188b5af5a 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -517,7 +517,7 @@ func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() // example: - // sudo /var/lib/minikube/binaries/v1.17.3/kubectl label nodes minikube.k8s.io/version=v1.7.3 minikube.k8s.io/commit=aa91f39ffbcf27dcbb93c4ff3f457c54e585cf4a-dirty minikube.k8s.io/name=p1 minikube.k8s.io/updated_at=2020_02_20T12_05_35_0700 --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig + // sudo /var/lib/minikube/binaries//kubectl label nodes minikube.k8s.io/version= minikube.k8s.io/commit=aa91f39ffbcf27dcbb93c4ff3f457c54e585cf4a-dirty minikube.k8s.io/name=p1 minikube.k8s.io/updated_at=2020_02_20T12_05_35_0700 --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig cmd := exec.CommandContext(ctx, "sudo", path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"), "label", "nodes", verLbl, commitLbl, nameLbl, createdAtLbl, "--all", "--overwrite", diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index 5caeeb7505..ecaa8cdb98 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -26,9 +26,9 @@ import ( const ( // DefaultKubernetesVersion is the default kubernetes version - DefaultKubernetesVersion = "v1.17.3" + DefaultKubernetesVersion = "v1.18.0-rc.1" // NewestKubernetesVersion is the newest Kubernetes version to test against - NewestKubernetesVersion = "v1.18.0-beta.2" + NewestKubernetesVersion = "v1.18.0-rc.1" // OldestKubernetesVersion is the oldest Kubernetes version to test against OldestKubernetesVersion = "v1.11.10" // DefaultClusterName is the default nane for the k8s cluster From 66a6f4e9060b85d3e4f6ec92c8f33cb32472a70c Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 17 Mar 2020 12:28:44 -0700 Subject: [PATCH 078/668] fix docker status --- cmd/minikube/cmd/node_add.go | 5 +++++ pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 3 ++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index a450684ec3..1e28103142 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -23,6 +23,7 @@ import ( "github.com/spf13/pflag" "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" @@ -43,6 +44,10 @@ var nodeAddCmd = &cobra.Command{ exit.WithError("Error getting config", err) } + if driver.BareMetal(cc.Driver) { + out.ErrT(out.FailureType, "none driver does not support multi-node clusters") + } + name := fmt.Sprintf("m%02d", len(cc.Nodes)+1) out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 6bd2403b71..0863688ea8 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -276,10 +276,11 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time port := n.Port if driver.IsKIC(cfg.Driver) { ip = oci.DefaultBindIPV4 - port, err := oci.ForwardedPort(cfg.Driver, driver.MachineName(cfg, n), port) + p, err := oci.ForwardedPort(cfg.Driver, driver.MachineName(cfg, n), port) if err != nil { return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name) } + port = p } if n.ControlPlane { if err := kverify.APIServerIsRunning(start, ip, port, timeout); err != nil { From c38cc9debef88afe1c0504b699dd439c4ed7fe66 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 17 Mar 2020 13:13:07 -0700 Subject: [PATCH 079/668] Add cloudbuild setup for preload generator image --- Makefile | 4 +++ deploy/preload/Dockerfile | 47 ++++++++++++++++++++++++++++++++++ deploy/preload/cloudbuild.yaml | 2 ++ 3 files changed, 53 insertions(+) create mode 100644 deploy/preload/Dockerfile create mode 100644 deploy/preload/cloudbuild.yaml diff --git a/Makefile b/Makefile index 8d46355ada..ad1deba4f6 100755 --- a/Makefile +++ b/Makefile @@ -523,6 +523,10 @@ kic-base-image: ## builds the base image used for kic. docker rmi -f $(REGISTRY)/kicbase:$(KIC_VERSION)-snapshot || true docker build -f ./hack/images/kicbase.Dockerfile -t $(REGISTRY)/kicbase:$(KIC_VERSION)-snapshot --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --target base . +.PHONY: preload-generator-image +preload-generator-image: + docker build -t gcr.io/k8s-minikube/preload-generator -f deploy/preload/Dockerfile deploy/preload + .PHONY: upload-preloaded-images-tar upload-preloaded-images-tar: out/minikube # Upload the preloaded images for oldest supported, newest supported, and default kubernetes versions to GCS. go run ./hack/preload-images/*.go diff --git a/deploy/preload/Dockerfile b/deploy/preload/Dockerfile new file mode 100644 index 0000000000..5159051106 --- /dev/null +++ b/deploy/preload/Dockerfile @@ -0,0 +1,47 @@ +# Copyright 2020 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Download gcloud +FROM alpine:3.10 as download-gcloud +ENV GCLOUD_VERSION 276.0.0 +ENV GCLOUD_URL https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-${GCLOUD_VERSION}-linux-x86_64.tar.gz +RUN wget -O gcloud.tar.gz "${GCLOUD_URL}" +RUN tar -zxf gcloud.tar.gz + +FROM gcr.io/gcp-runtimes/ubuntu_16_0_4 + +# Install python and make +RUN apt-get update && \ + apt-get install --no-install-recommends --no-install-suggests -y \ + python make && \ + rm -rf /var/lib/apt/lists/* + +# Install docker +COPY --from=docker:18.09.6 /usr/local/bin/docker /usr/local/bin/ + +# Install gcloud dependencies +COPY --from=download-gcloud google-cloud-sdk/ /google-cloud-sdk/ +# Finish installation of gcloud +RUN CLOUDSDK_PYTHON="python2.7" /google-cloud-sdk/install.sh \ + --usage-reporting=false \ + --bash-completion=false \ + --disable-installation-options +ENV PATH=$PATH:/google-cloud-sdk/bin +RUN gcloud auth configure-docker + +# Install go +COPY --from=golang:1.14 /usr/local/go /usr/local/go +ENV PATH=$PATH:/usr/local/go/bin + +CMD make upload-preloaded-images-tar diff --git a/deploy/preload/cloudbuild.yaml b/deploy/preload/cloudbuild.yaml new file mode 100644 index 0000000000..7b53696447 --- /dev/null +++ b/deploy/preload/cloudbuild.yaml @@ -0,0 +1,2 @@ +steps: + - image: gcr.io/k8s-minikube/preload-generator From 2778bd1c99b8550d538a3830cceb70a315e84385 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 17 Mar 2020 13:14:38 -0700 Subject: [PATCH 080/668] Build preload generator image in cloudbuild --- deploy/preload/cloudbuild.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/deploy/preload/cloudbuild.yaml b/deploy/preload/cloudbuild.yaml index 7b53696447..750e8524d7 100644 --- a/deploy/preload/cloudbuild.yaml +++ b/deploy/preload/cloudbuild.yaml @@ -1,2 +1,4 @@ steps: - - image: gcr.io/k8s-minikube/preload-generator + - image: gcr.io/cloud-builders/docker + args: ["build", "-t", "preload-generator", "-f", "deploy/preload/Dockerfile", "deploy/preload"] + - image: preload-generator From 5659a1df6ea47ef37650d6fc153883b689d31ec2 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 17 Mar 2020 13:52:38 -0700 Subject: [PATCH 081/668] Remove cloudbuild stuff, run preload generation in jenkins --- Makefile | 4 -- deploy/preload/Dockerfile | 47 ------------------- deploy/preload/cloudbuild.yaml | 4 -- .../minikube_cross_build_and_upload.sh | 2 +- hack/jenkins/preload_generation.sh | 28 +++++++++++ 5 files changed, 29 insertions(+), 56 deletions(-) delete mode 100644 deploy/preload/Dockerfile delete mode 100644 deploy/preload/cloudbuild.yaml create mode 100644 hack/jenkins/preload_generation.sh diff --git a/Makefile b/Makefile index ad1deba4f6..8d46355ada 100755 --- a/Makefile +++ b/Makefile @@ -523,10 +523,6 @@ kic-base-image: ## builds the base image used for kic. docker rmi -f $(REGISTRY)/kicbase:$(KIC_VERSION)-snapshot || true docker build -f ./hack/images/kicbase.Dockerfile -t $(REGISTRY)/kicbase:$(KIC_VERSION)-snapshot --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --target base . -.PHONY: preload-generator-image -preload-generator-image: - docker build -t gcr.io/k8s-minikube/preload-generator -f deploy/preload/Dockerfile deploy/preload - .PHONY: upload-preloaded-images-tar upload-preloaded-images-tar: out/minikube # Upload the preloaded images for oldest supported, newest supported, and default kubernetes versions to GCS. go run ./hack/preload-images/*.go diff --git a/deploy/preload/Dockerfile b/deploy/preload/Dockerfile deleted file mode 100644 index 5159051106..0000000000 --- a/deploy/preload/Dockerfile +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2020 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Download gcloud -FROM alpine:3.10 as download-gcloud -ENV GCLOUD_VERSION 276.0.0 -ENV GCLOUD_URL https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-${GCLOUD_VERSION}-linux-x86_64.tar.gz -RUN wget -O gcloud.tar.gz "${GCLOUD_URL}" -RUN tar -zxf gcloud.tar.gz - -FROM gcr.io/gcp-runtimes/ubuntu_16_0_4 - -# Install python and make -RUN apt-get update && \ - apt-get install --no-install-recommends --no-install-suggests -y \ - python make && \ - rm -rf /var/lib/apt/lists/* - -# Install docker -COPY --from=docker:18.09.6 /usr/local/bin/docker /usr/local/bin/ - -# Install gcloud dependencies -COPY --from=download-gcloud google-cloud-sdk/ /google-cloud-sdk/ -# Finish installation of gcloud -RUN CLOUDSDK_PYTHON="python2.7" /google-cloud-sdk/install.sh \ - --usage-reporting=false \ - --bash-completion=false \ - --disable-installation-options -ENV PATH=$PATH:/google-cloud-sdk/bin -RUN gcloud auth configure-docker - -# Install go -COPY --from=golang:1.14 /usr/local/go /usr/local/go -ENV PATH=$PATH:/usr/local/go/bin - -CMD make upload-preloaded-images-tar diff --git a/deploy/preload/cloudbuild.yaml b/deploy/preload/cloudbuild.yaml deleted file mode 100644 index 750e8524d7..0000000000 --- a/deploy/preload/cloudbuild.yaml +++ /dev/null @@ -1,4 +0,0 @@ -steps: - - image: gcr.io/cloud-builders/docker - args: ["build", "-t", "preload-generator", "-f", "deploy/preload/Dockerfile", "deploy/preload"] - - image: preload-generator diff --git a/hack/jenkins/minikube_cross_build_and_upload.sh b/hack/jenkins/minikube_cross_build_and_upload.sh index ff9dc97092..382adacea7 100755 --- a/hack/jenkins/minikube_cross_build_and_upload.sh +++ b/hack/jenkins/minikube_cross_build_and_upload.sh @@ -39,7 +39,7 @@ declare -rx TAG="${ghprbActualCommit}" docker kill $(docker ps -q) || true docker rm $(docker ps -aq) || true -make -j 16 all && failed=$? || failed=$? +make -j 16 all && failed=$? || failed=$? "out/minikube-$(go env GOOS)-$(go env GOARCH)" version diff --git a/hack/jenkins/preload_generation.sh b/hack/jenkins/preload_generation.sh new file mode 100644 index 0000000000..66e2ba1125 --- /dev/null +++ b/hack/jenkins/preload_generation.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script builds all the minikube binary for all 3 platforms as well as Windows-installer and .deb +# This is intended to be run on a new release tag in order to build/upload the required files for a release + + +set -eux -o pipefail + +# Make sure the right golang version is installed based on Makefile +WANT_GOLANG_VERSION=$(grep '^GO_VERSION' Makefile | awk '{ print $3 }') +./hack/jenkins/installers/check_install_golang.sh $WANT_GOLANG_VERSION /usr/local + +make upload-preloaded-images-tar +make clean From 579ac4411cb26f0eb9089408e33732ffe10d46d5 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 17 Mar 2020 13:58:03 -0700 Subject: [PATCH 082/668] Use one tarballName function for consistency and add amd64 to the name --- hack/preload-images/preload_images.go | 6 +----- pkg/minikube/download/preload.go | 16 ++++++++-------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/hack/preload-images/preload_images.go b/hack/preload-images/preload_images.go index 7a9956d9c0..ceb0e5021b 100644 --- a/hack/preload-images/preload_images.go +++ b/hack/preload-images/preload_images.go @@ -62,7 +62,7 @@ func main() { for _, kv := range k8sVersions { for _, cr := range containerRuntimes { - tf := tarballFilename(kv, cr) + tf := download.TarballName(kv) if tarballExists(tf) { fmt.Printf("A preloaded tarball for k8s version %s already exists, skipping generation.\n", kv) continue @@ -78,10 +78,6 @@ func main() { } } -func tarballFilename(kubernetesVersion string, containerRuntime string) string { - return fmt.Sprintf("preloaded-images-k8s-%s-%s-%s-%s.tar.lz4", preloadedTarballVersion, kubernetesVersion, containerRuntime, dockerStorageDriver) -} - func tarballExists(tarballFilename string) bool { fmt.Println("Checking if tarball already exists...") gcsPath := fmt.Sprintf("gs://%s/%s", download.PreloadBucket, tarballFilename) diff --git a/pkg/minikube/download/preload.go b/pkg/minikube/download/preload.go index 62893edf1b..08f7e307c4 100644 --- a/pkg/minikube/download/preload.go +++ b/pkg/minikube/download/preload.go @@ -42,14 +42,14 @@ const ( PreloadBucket = "minikube-preloaded-volume-tarballs" ) -// returns name of the tarball -func tarballName(k8sVersion string) string { - return fmt.Sprintf("preloaded-images-k8s-%s-%s-docker-overlay2.tar.lz4", PreloadVersion, k8sVersion) +// TarballName returns name of the tarball +func TarballName(k8sVersion string) string { + return fmt.Sprintf("preloaded-images-k8s-%s-%s-docker-overlay2-amd64.tar.lz4", PreloadVersion, k8sVersion) } // returns the name of the checksum file func checksumName(k8sVersion string) string { - return fmt.Sprintf("%s.checksum", tarballName(k8sVersion)) + return fmt.Sprintf("%s.checksum", TarballName(k8sVersion)) } // returns target dir for all cached items related to preloading @@ -64,12 +64,12 @@ func PreloadChecksumPath(k8sVersion string) string { // TarballPath returns the path to the preloaded tarball func TarballPath(k8sVersion string) string { - return path.Join(targetDir(), tarballName(k8sVersion)) + return path.Join(targetDir(), TarballName(k8sVersion)) } // remoteTarballURL returns the URL for the remote tarball in GCS func remoteTarballURL(k8sVersion string) string { - return fmt.Sprintf("https://storage.googleapis.com/%s/%s", PreloadBucket, tarballName(k8sVersion)) + return fmt.Sprintf("https://storage.googleapis.com/%s/%s", PreloadBucket, TarballName(k8sVersion)) } // PreloadExists returns true if there is a preloaded tarball that can be used @@ -147,13 +147,13 @@ func Preload(k8sVersion, containerRuntime string) error { } func saveChecksumFile(k8sVersion string) error { - glog.Infof("saving checksum for %s ...", tarballName(k8sVersion)) + glog.Infof("saving checksum for %s ...", TarballName(k8sVersion)) ctx := context.Background() client, err := storage.NewClient(ctx, option.WithoutAuthentication()) if err != nil { return errors.Wrap(err, "getting storage client") } - attrs, err := client.Bucket(PreloadBucket).Object(tarballName(k8sVersion)).Attrs(ctx) + attrs, err := client.Bucket(PreloadBucket).Object(TarballName(k8sVersion)).Attrs(ctx) if err != nil { return errors.Wrap(err, "getting storage object") } From d5490a8fb80f67c821fec1c8d426f8d64cd77160 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 17 Mar 2020 16:03:29 -0700 Subject: [PATCH 083/668] Move driver validation before driver selection If a user passes in a driver via --vm-driver or --driver, then we should make sure it matches the preexisting cluster (if one exists) before we try and get information about the driver. This avoids confusing logs where we tell the user we're using the preexisting driver but then error out because they specified a different driver. --- cmd/minikube/cmd/start.go | 90 ++++++++++++++++++++++----------------- 1 file changed, 50 insertions(+), 40 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 44e3d3210c..13ea4c891f 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -294,6 +294,7 @@ func runStart(cmd *cobra.Command, args []string) { exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err}) } + validateSpecifiedDriver(existing) ds := selectDriver(existing) driverName := ds.Name glog.Infof("selected driver: %s", driverName) @@ -482,6 +483,55 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState { return pick } +// validateSpecifiedDriver makes sure that if a user has passed in a driver +// it matches the existing cluster if there is one +func validateSpecifiedDriver(existing *config.ClusterConfig) { + if existing == nil { + return + } + old := existing.Driver + var requested string + if d := viper.GetString("driver"); d != "" { + requested = d + } else if d := viper.GetString("vm-driver"); d != "" { + requested = d + } + if old == requested { + return + } + + api, err := machine.NewAPIClient() + if err != nil { + glog.Warningf("selectDriver NewAPIClient: %v", err) + return + } + + cp, err := config.PrimaryControlPlane(existing) + if err != nil { + exit.WithError("Error getting primary cp", err) + } + machineName := driver.MachineName(*existing, cp) + h, err := api.Load(machineName) + if err != nil { + glog.Warningf("selectDriver api.Load: %v", err) + return + } + + out.ErrT(out.Conflict, `The existing "{{.profile_name}}" VM was created using the "{{.old_driver}}" driver, and is incompatible with the "{{.driver}}" driver.`, + out.V{"profile_name": machineName, "driver": requested, "old_driver": h.Driver.DriverName()}) + + out.ErrT(out.Workaround, `To proceed, either: + +1) Delete the existing "{{.profile_name}}" cluster using: '{{.command}} delete' + +* or * + +2) Start the existing "{{.profile_name}}" cluster using: '{{.command}} start --driver={{.old_driver}}' +`, out.V{"command": minikubeCmd(), "old_driver": h.Driver.DriverName(), "profile_name": machineName}) + + exit.WithCodeT(exit.Config, "Exiting.") +} + // validateDriver validates that the selected driver appears sane, exits if not func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { name := ds.Name @@ -510,46 +560,6 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed", out.V{"driver": name}) } } - - if existing == nil { - return - } - - api, err := machine.NewAPIClient() - if err != nil { - glog.Warningf("selectDriver NewAPIClient: %v", err) - return - } - - cp, err := config.PrimaryControlPlane(existing) - if err != nil { - exit.WithError("Error getting primary cp", err) - } - - machineName := driver.MachineName(*existing, cp) - h, err := api.Load(machineName) - if err != nil { - glog.Warningf("selectDriver api.Load: %v", err) - return - } - - if h.Driver.DriverName() == name { - return - } - - out.ErrT(out.Conflict, `The existing "{{.profile_name}}" VM that was created using the "{{.old_driver}}" driver, and is incompatible with the "{{.driver}}" driver.`, - out.V{"profile_name": machineName, "driver": name, "old_driver": h.Driver.DriverName()}) - - out.ErrT(out.Workaround, `To proceed, either: - - 1) Delete the existing "{{.profile_name}}" cluster using: '{{.command}} delete' - - * or * - - 2) Start the existing "{{.profile_name}}" cluster using: '{{.command}} start --driver={{.old_driver}}' - `, out.V{"command": minikubeCmd(), "old_driver": h.Driver.DriverName(), "profile_name": machineName}) - - exit.WithCodeT(exit.Config, "Exiting.") } func selectImageRepository(mirrorCountry string, v semver.Version) (bool, string, error) { From dd707820fc85f36e4576720a7a4f132cf63b7871 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 17 Mar 2020 18:07:48 -0700 Subject: [PATCH 084/668] If memory is specified in config, suggest that for memory allocation --- cmd/minikube/cmd/start.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 44e3d3210c..9fc5195950 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -666,6 +666,9 @@ func memoryLimits(drvName string) (int, int, error) { // suggestMemoryAllocation calculates the default memory footprint in MB func suggestMemoryAllocation(sysLimit int, containerLimit int) int { + if mem := viper.GetInt(memory); mem != 0 { + return mem + } fallback := 2200 maximum := 6000 From 196eb6a8537f4d98af7cf19ae0fa592dfc2c332c Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 17 Mar 2020 18:38:32 -0700 Subject: [PATCH 085/668] Start adding doc for issue triage --- site/content/en/docs/Contributing/triage.md | 73 +++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 site/content/en/docs/Contributing/triage.md diff --git a/site/content/en/docs/Contributing/triage.md b/site/content/en/docs/Contributing/triage.md new file mode 100644 index 0000000000..7ba7d66287 --- /dev/null +++ b/site/content/en/docs/Contributing/triage.md @@ -0,0 +1,73 @@ +--- +linkTitle: "Triage" +title: "Triaging Minikube Issues" +date: 2020-03-17 +weight: 10 +description: > + How to triage issues in the minikube repo +--- + +Triage is an important part of maintaining the health of the minikube repo. +A well organized repo allows maintainers to prioritize feature requests, fix bugs, and respond to users facing difficulty with the tool as quickly as possible. + +Triage includes: +- Labeling issues +- Responding to issues +- Closing issues (under certain circumstances!) + +If you're interested in helping out with minikube triage, this doc covers the basics of doing so. + +Additionally, if you'd be interested in participating in our weekly triage meeting, please fill out this [form](https://forms.gle/vNtWZSWXqeYaaNbU9) to express interest. Thank you! + +# Labeling Issues + + + + + +# Responding to Issues + +Many issues in the minikube repo fall into one of the following categories: +- Needs more information from the author to be actionable +- Duplicate Issue + + +## Closing with Care + +Issues typically need to be closed for the following reasons: + +- The issue has been addressed +- The issue is a duplicate of an existing issue +- There has been a lack of information over a long period of time + +In any of these situations, we aim to be kind when closing the issue, and offer the author action items should they need to reopen their issue or still require a solution. + +Samples responses for these situations include: + +### Issue has been addressed + +@author: I believe this issue is now addressed by minikube v1.4, as it . If you still see this issue with minikube v1.4 or higher, please reopen this issue by commenting with `/reopen` + +Thank you for reporting this issue! + +### Duplicate Issue + + +This issue appears to be a duplicate of #X, do you mind if we move the conversation there? + +This way we can centralize the content relating to the issue. If you feel that this issue is not in fact a duplicate, please re-open it using `/reopen`. If you have additional information to share, please add it to the new issue. + +Thank you for reporting this! + +### Lack of Information + +Hey @author -- hopefully it's OK if I close this - there wasn't enough information to make it actionable, and some time has already passed. If you are able to provide additional details, you may reopen it at any point by adding /reopen to your comment. + +Here is additional information that may be helpful to us: + +* Whether the issue occurs with the latest minikube release +* The exact `minikube start` command line used +* The full output of the `minikube start` command, preferably with `--alsologtostderr -v=3` for extra logging. + * The full output of `minikube logs` + +Thank you for sharing your experience! From e95e154fdd6d1b93da983ccba8999258fbec2d8e Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 18 Mar 2020 09:47:15 -0700 Subject: [PATCH 086/668] Add release notes for v1.9.0-beta.1 --- CHANGELOG.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a6d0871151..68ff00fca6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,35 @@ # Release Notes +## Version 1.9.0-beta.1 - 2020-03-18 + +New features + +* Use Kubernetes v1.18.0-rc.1 by default [#7076](https://github.com/kubernetes/minikube/pull/7076) +* Upgrade Docker, from 19.03.7 to 19.03.8 [#7040](https://github.com/kubernetes/minikube/pull/7040) +* Upgrade Docker, from 19.03.6 to 19.03.7 [#6939](https://github.com/kubernetes/minikube/pull/6939) +* Updated French translation [#7055](https://github.com/kubernetes/minikube/pull/7055) + +Bugfixes + +* Strip the version prefix before calling semver [#7054](https://github.com/kubernetes/minikube/pull/7054) +* Move some of the driver validation before driver selection [#7080](https://github.com/kubernetes/minikube/pull/7080) +* Fix bug where global config memory was ignored [#7082](https://github.com/kubernetes/minikube/pull/7082) +* Remove controllerManager from the kubeadm v1beta2 template [#7030](https://github.com/kubernetes/minikube/pull/7030) +* Delete: output underlying status failure [#7043](https://github.com/kubernetes/minikube/pull/7043) +* status: error properly if cluster does not exist [#7041](https://github.com/kubernetes/minikube/pull/7041) + +Huge thank you for this release towards our contributors: + +- Anders F Björklund +- Medya Ghazizadeh +- Priya Wadhwa +- Richard Wall +- Sharif Elgamal +- Thomas Strömberg +- Vikky Omkar +- jumahmohammad +- vikkyomkar + ## Version 1.8.2 - 2020-03-13 Shiny new improvements: From bed70069702e8c122f4f3306298a861e1ce9f201 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 18 Mar 2020 10:07:49 -0700 Subject: [PATCH 087/668] Upgrade Docker driver to preferred (Linux), default on other platforms --- pkg/minikube/registry/drvs/docker/docker.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/pkg/minikube/registry/drvs/docker/docker.go b/pkg/minikube/registry/drvs/docker/docker.go index 8549d35706..a9191a35d9 100644 --- a/pkg/minikube/registry/drvs/docker/docker.go +++ b/pkg/minikube/registry/drvs/docker/docker.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "os/exec" + "runtime" "time" "github.com/docker/machine/libmachine/drivers" @@ -32,12 +33,21 @@ import ( ) func init() { + priority := registry.Default + // Staged rollout for preferred: + // - Linux + // - Windows (once "service" command works) + // - macOS + if runtime.GOOS == "linux" { + priority = registry.Preferred + } + if err := registry.Register(registry.DriverDef{ Name: driver.Docker, Config: configure, Init: func() drivers.Driver { return kic.NewDriver(kic.Config{OCIBinary: oci.Docker}) }, Status: status, - Priority: registry.Fallback, + Priority: priority, }); err != nil { panic(fmt.Sprintf("register failed: %v", err)) } From 68baafccf0eade8940a0e4559b2f85c21dacd01e Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Wed, 18 Mar 2020 10:41:24 -0700 Subject: [PATCH 088/668] fix lint --- hack/preload-images/preload_images.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/hack/preload-images/preload_images.go b/hack/preload-images/preload_images.go index ceb0e5021b..60b6bc9e2a 100644 --- a/hack/preload-images/preload_images.go +++ b/hack/preload-images/preload_images.go @@ -33,11 +33,10 @@ const ( ) var ( - dockerStorageDriver = "overlay2" - preloadedTarballVersion = "v1" - containerRuntimes = []string{"docker"} - k8sVersion string - k8sVersions []string + dockerStorageDriver = "overlay2" + containerRuntimes = []string{"docker"} + k8sVersion string + k8sVersions []string ) func init() { From 17a30cdd7a5dd36a5be020ec5349c97c00fbaf67 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Wed, 18 Mar 2020 11:03:30 -0700 Subject: [PATCH 089/668] Change download test to use unsupported preload version So that we can still make sure that images are being pulled properly. Also, remove the check for the kic base image. Right now, it isn't working, and I'm working on a fix in another PR. --- test/integration/aaa_download_only_test.go | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index e4ca5ebb26..057b636c9c 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -54,8 +54,7 @@ func TestDownloadOnly(t *testing.T) { t.Run("group", func(t *testing.T) { versions := []string{ constants.OldestKubernetesVersion, - constants.DefaultKubernetesVersion, - constants.NewestKubernetesVersion, + "v1.12.0", } for _, v := range versions { t.Run(v, func(t *testing.T) { @@ -197,15 +196,6 @@ func TestDownloadOnlyDocker(t *testing.T) { if string(remoteChecksum) != string(checksum[:]) { t.Errorf("checksum of %s does not match remote checksum (%s != %s)", tarball, string(remoteChecksum), string(checksum[:])) } - - // Make sure this image exists in the docker daemon - ref, err := name.ParseReference(kic.BaseImage) - if err != nil { - t.Errorf("parsing reference failed: %v", err) - } - if _, err := daemon.Image(ref); err != nil { - t.Errorf("expected image does not exist in local daemon: %v", err) - } } func runningDockerDriver(startArgs []string) bool { From 2a38c7395ee9037c7e74012a819e76fe87a89f24 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Wed, 18 Mar 2020 11:49:09 -0700 Subject: [PATCH 090/668] fix lint --- go.mod | 1 - go.sum | 2 -- test/integration/aaa_download_only_test.go | 3 --- 3 files changed, 6 deletions(-) diff --git a/go.mod b/go.mod index b1861de795..a703f57861 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,6 @@ require ( github.com/google/go-cmp v0.3.1 github.com/google/go-containerregistry v0.0.0-20200131185320-aec8da010de2 github.com/google/go-github v17.0.0+incompatible - github.com/google/go-github/v29 v29.0.3 // indirect github.com/googleapis/gnostic v0.3.0 // indirect github.com/hashicorp/go-getter v1.4.0 github.com/hashicorp/go-retryablehttp v0.5.4 diff --git a/go.sum b/go.sum index dfeb89780e..d84ef83e6e 100644 --- a/go.sum +++ b/go.sum @@ -338,8 +338,6 @@ github.com/google/go-containerregistry v0.0.0-20200131185320-aec8da010de2 h1:/z0 github.com/google/go-containerregistry v0.0.0-20200131185320-aec8da010de2/go.mod h1:Wtl/v6YdQxv397EREtzwgd9+Ud7Q5D8XMbi3Zazgkrs= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-github/v29 v29.0.3 h1:IktKCTwU//aFHnpA+2SLIi7Oo9uhAzgsdZNbcAqhgdc= -github.com/google/go-github/v29 v29.0.3/go.mod h1:CHKiKKPHJ0REzfwc14QMklvtHwCveD0PxlMjLlzAM5E= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index 057b636c9c..2c2210b590 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -32,9 +32,6 @@ import ( "testing" "time" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/daemon" - "k8s.io/minikube/pkg/drivers/kic" "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" From ff8ae7311cd8c3e71c303b0d45386b9a5aa73b0c Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Wed, 18 Mar 2020 12:27:17 -0700 Subject: [PATCH 091/668] If user doesn't specify driver, don't validate against existing cluster --- cmd/minikube/cmd/start.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index be41c2b560..6227dfa16a 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -496,6 +496,10 @@ func validateSpecifiedDriver(existing *config.ClusterConfig) { } else if d := viper.GetString("vm-driver"); d != "" { requested = d } + // Neither --vm-driver or --driver was specified + if requested == "" { + return + } if old == requested { return } From 53da7250303b84113c8e870b07a06b030fd58249 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 18 Mar 2020 12:27:41 -0700 Subject: [PATCH 092/668] Dashboard upgrade to rc6 --- deploy/addons/dashboard/dashboard-dp.yaml | 2 +- pkg/minikube/bootstrapper/images/images.go | 2 +- pkg/minikube/bootstrapper/images/images_test.go | 4 ++-- pkg/minikube/bootstrapper/images/kubeadm_test.go | 14 +++++++------- pkg/minikube/download/preload.go | 4 +++- 5 files changed, 14 insertions(+), 12 deletions(-) diff --git a/deploy/addons/dashboard/dashboard-dp.yaml b/deploy/addons/dashboard/dashboard-dp.yaml index 98ac68e359..8af7003de4 100644 --- a/deploy/addons/dashboard/dashboard-dp.yaml +++ b/deploy/addons/dashboard/dashboard-dp.yaml @@ -90,7 +90,7 @@ spec: containers: - name: kubernetes-dashboard # WARNING: This must match pkg/minikube/bootstrapper/images/images.go - image: kubernetesui/dashboard:v2.0.0-beta8 + image: kubernetesui/dashboard:v2.0.0-rc6 ports: - containerPort: 9090 protocol: TCP diff --git a/pkg/minikube/bootstrapper/images/images.go b/pkg/minikube/bootstrapper/images/images.go index 1029e94007..4b619359b9 100644 --- a/pkg/minikube/bootstrapper/images/images.go +++ b/pkg/minikube/bootstrapper/images/images.go @@ -135,7 +135,7 @@ func dashboardFrontend(repo string) string { repo = "kubernetesui" } // See 'kubernetes-dashboard' in deploy/addons/dashboard/dashboard-dp.yaml - return path.Join(repo, "dashboard:v2.0.0-beta8") + return path.Join(repo, "dashboard:v2.0.0-rc6") } // dashboardMetrics returns the image used for the dashboard metrics scraper diff --git a/pkg/minikube/bootstrapper/images/images_test.go b/pkg/minikube/bootstrapper/images/images_test.go index 6a2edc5da2..e623a9002b 100644 --- a/pkg/minikube/bootstrapper/images/images_test.go +++ b/pkg/minikube/bootstrapper/images/images_test.go @@ -25,7 +25,7 @@ import ( func TestAuxiliary(t *testing.T) { want := []string{ "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "kubernetesui/dashboard:v2.0.0-beta8", + "kubernetesui/dashboard:v2.0.0-rc6", "kubernetesui/metrics-scraper:v1.0.2", } got := auxiliary("") @@ -37,7 +37,7 @@ func TestAuxiliary(t *testing.T) { func TestAuxiliaryMirror(t *testing.T) { want := []string{ "test.mirror/storage-provisioner:v1.8.1", - "test.mirror/dashboard:v2.0.0-beta8", + "test.mirror/dashboard:v2.0.0-rc6", "test.mirror/metrics-scraper:v1.0.2", } got := auxiliary("test.mirror") diff --git a/pkg/minikube/bootstrapper/images/kubeadm_test.go b/pkg/minikube/bootstrapper/images/kubeadm_test.go index 1819254f23..d705b5e74c 100644 --- a/pkg/minikube/bootstrapper/images/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/images/kubeadm_test.go @@ -38,7 +38,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd:3.4.3-0", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "kubernetesui/dashboard:v2.0.0-beta8", + "kubernetesui/dashboard:v2.0.0-rc6", "kubernetesui/metrics-scraper:v1.0.2", }}, {"v1.16.1", "mirror.k8s.io", []string{ @@ -50,7 +50,7 @@ func TestKubeadmImages(t *testing.T) { "mirror.k8s.io/etcd:3.3.15-0", "mirror.k8s.io/pause:3.1", "mirror.k8s.io/storage-provisioner:v1.8.1", - "mirror.k8s.io/dashboard:v2.0.0-beta8", + "mirror.k8s.io/dashboard:v2.0.0-rc6", "mirror.k8s.io/metrics-scraper:v1.0.2", }}, {"v1.15.0", "", []string{ @@ -62,7 +62,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd:3.3.10", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "kubernetesui/dashboard:v2.0.0-beta8", + "kubernetesui/dashboard:v2.0.0-rc6", "kubernetesui/metrics-scraper:v1.0.2", }}, {"v1.14.0", "", []string{ @@ -74,7 +74,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd:3.3.10", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "kubernetesui/dashboard:v2.0.0-beta8", + "kubernetesui/dashboard:v2.0.0-rc6", "kubernetesui/metrics-scraper:v1.0.2", }}, {"v1.13.0", "", []string{ @@ -86,7 +86,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd:3.2.24", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "kubernetesui/dashboard:v2.0.0-beta8", + "kubernetesui/dashboard:v2.0.0-rc6", "kubernetesui/metrics-scraper:v1.0.2", }}, {"v1.12.0", "", []string{ @@ -98,7 +98,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd:3.2.24", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "kubernetesui/dashboard:v2.0.0-beta8", + "kubernetesui/dashboard:v2.0.0-rc6", "kubernetesui/metrics-scraper:v1.0.2", }}, {"v1.11.10", "", []string{ @@ -110,7 +110,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd-amd64:3.2.18", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "kubernetesui/dashboard:v2.0.0-beta8", + "kubernetesui/dashboard:v2.0.0-rc6", "kubernetesui/metrics-scraper:v1.0.2", }}, } diff --git a/pkg/minikube/download/preload.go b/pkg/minikube/download/preload.go index 62893edf1b..d97845065e 100644 --- a/pkg/minikube/download/preload.go +++ b/pkg/minikube/download/preload.go @@ -37,7 +37,9 @@ import ( const ( // PreloadVersion is the current version of the preloaded tarball - PreloadVersion = "v1" + // + // NOTE: You may need to bump this version up when upgrading auxiliary docker images + PreloadVersion = "v2" // PreloadBucket is the name of the GCS bucket where preloaded volume tarballs exist PreloadBucket = "minikube-preloaded-volume-tarballs" ) From 426b77680901c1b5fb04bd8e4068b987b53b0591 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Wed, 18 Mar 2020 20:48:31 +0100 Subject: [PATCH 093/668] Upgrade crio to 1.17.1 --- deploy/iso/minikube-iso/package/crio-bin/crio-bin.hash | 1 + deploy/iso/minikube-iso/package/crio-bin/crio-bin.mk | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/deploy/iso/minikube-iso/package/crio-bin/crio-bin.hash b/deploy/iso/minikube-iso/package/crio-bin/crio-bin.hash index 28f2852b75..d0f12d3d77 100644 --- a/deploy/iso/minikube-iso/package/crio-bin/crio-bin.hash +++ b/deploy/iso/minikube-iso/package/crio-bin/crio-bin.hash @@ -11,3 +11,4 @@ sha256 70d4c746fe207422c78420dc4239768f485eea639a38c993c02872ec6305dd1d v1.15.2. sha256 05f9614c4d5970b4662499b84c270b0ab953596ee863dcd09c9dc7a2d2f09789 v1.16.0.tar.gz sha256 57e1ee990ef2d5af8b32c33a21b4998682608e3556dcf1d3349666f55e7d95b9 v1.16.1.tar.gz sha256 23a797762e4544ee7c171ef138cfc1141a3f0acc2838d9965c2a58e53b16c3ae v1.17.0.tar.gz +sha256 7967e9218fdfb59d6005a9e19c1668469bc5566c2a35927cffe7de8656bb22c7 v1.17.1.tar.gz diff --git a/deploy/iso/minikube-iso/package/crio-bin/crio-bin.mk b/deploy/iso/minikube-iso/package/crio-bin/crio-bin.mk index 0ddf03aeba..f1a58a31b7 100644 --- a/deploy/iso/minikube-iso/package/crio-bin/crio-bin.mk +++ b/deploy/iso/minikube-iso/package/crio-bin/crio-bin.mk @@ -4,8 +4,8 @@ # ################################################################################ -CRIO_BIN_VERSION = v1.17.0 -CRIO_BIN_COMMIT = 6d0ffae63b9b7d8f07e7f9cf50736a67fb31faf3 +CRIO_BIN_VERSION = v1.17.1 +CRIO_BIN_COMMIT = ee2de87bd8e2a7a84799476cb4fc4ce8a78fdf6d CRIO_BIN_SITE = https://github.com/cri-o/cri-o/archive CRIO_BIN_SOURCE = $(CRIO_BIN_VERSION).tar.gz CRIO_BIN_DEPENDENCIES = host-go libgpgme From f71b44689e341f92ee0f8a6772ef483f38b1b06f Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 18 Mar 2020 13:20:07 -0700 Subject: [PATCH 094/668] Add last minute v1.8.0-beta.1 additions to changelog --- CHANGELOG.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 68ff00fca6..835ccf9552 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,12 +5,16 @@ New features * Use Kubernetes v1.18.0-rc.1 by default [#7076](https://github.com/kubernetes/minikube/pull/7076) +* Upgrade Docker driver to preferred (Linux), default on other platforms [#7090](https://github.com/kubernetes/minikube/pull/7090) * Upgrade Docker, from 19.03.7 to 19.03.8 [#7040](https://github.com/kubernetes/minikube/pull/7040) * Upgrade Docker, from 19.03.6 to 19.03.7 [#6939](https://github.com/kubernetes/minikube/pull/6939) +* Upgrade dashboard to v2.0.0-rc6 [#7098](https://github.com/kubernetes/minikube/pull/7098) +* Upgrade crio to 1.17.1 [#7099](https://github.com/kubernetes/minikube/pull/7099) * Updated French translation [#7055](https://github.com/kubernetes/minikube/pull/7055) Bugfixes +* If user doesn't specify driver, don't validate against existing cluster [#7096](https://github.com/kubernetes/minikube/pull/7096) * Strip the version prefix before calling semver [#7054](https://github.com/kubernetes/minikube/pull/7054) * Move some of the driver validation before driver selection [#7080](https://github.com/kubernetes/minikube/pull/7080) * Fix bug where global config memory was ignored [#7082](https://github.com/kubernetes/minikube/pull/7082) @@ -23,12 +27,12 @@ Huge thank you for this release towards our contributors: - Anders F Björklund - Medya Ghazizadeh - Priya Wadhwa +- RA489 - Richard Wall - Sharif Elgamal - Thomas Strömberg - Vikky Omkar - jumahmohammad -- vikkyomkar ## Version 1.8.2 - 2020-03-13 From 1cebb31557b5ead8132b5b8a522bd3b6ad792945 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Wed, 18 Mar 2020 21:24:41 +0100 Subject: [PATCH 095/668] Upgrade buildroot minor version --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 6a3b3face8..f1256f8eb5 100755 --- a/Makefile +++ b/Makefile @@ -32,7 +32,7 @@ RPM_VERSION ?= $(DEB_VERSION) GO_VERSION ?= 1.13.8 INSTALL_SIZE ?= $(shell du out/minikube-windows-amd64.exe | cut -f1) -BUILDROOT_BRANCH ?= 2019.02.9 +BUILDROOT_BRANCH ?= 2019.02.10 REGISTRY?=gcr.io/k8s-minikube # Get git commit id @@ -52,7 +52,7 @@ MINIKUBE_BUCKET ?= minikube/releases MINIKUBE_UPLOAD_LOCATION := gs://${MINIKUBE_BUCKET} MINIKUBE_RELEASES_URL=https://github.com/kubernetes/minikube/releases/download -KERNEL_VERSION ?= 4.19.94 +KERNEL_VERSION ?= 4.19.107 # latest from https://github.com/golangci/golangci-lint/releases GOLINT_VERSION ?= v1.23.6 # Limit number of default jobs, to avoid the CI builds running out of memory From 3dfb0ae98d22d69ef2295a890f3a5c28dad45719 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Wed, 18 Mar 2020 13:50:24 -0700 Subject: [PATCH 096/668] check if preloaded tarball was downloaded if preload exists --- test/integration/aaa_download_only_test.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index 2c2210b590..3eda60e8cf 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -51,7 +51,8 @@ func TestDownloadOnly(t *testing.T) { t.Run("group", func(t *testing.T) { versions := []string{ constants.OldestKubernetesVersion, - "v1.12.0", + constants.DefaultKubernetesVersion, + constants.NewestKubernetesVersion, } for _, v := range versions { t.Run(v, func(t *testing.T) { @@ -70,6 +71,14 @@ func TestDownloadOnly(t *testing.T) { t.Errorf("%s failed: %v", args, err) } + if download.PreloadExists(v, "docker") { + // Just make sure the tarball path exists + if _, err := os.Stat(download.TarballPath(v)); err != nil { + t.Errorf("preloaded tarball path doesn't exist: %v", err) + } + return + } + imgs, err := images.Kubeadm("", v) if err != nil { t.Errorf("kubeadm images: %v %+v", v, err) From c13d7bafebf9c379f7f4debde164908d2cbf6779 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 18 Mar 2020 14:51:54 -0700 Subject: [PATCH 097/668] Detect AnonymousAuth issues, show the error log on console --- .../bootstrapper/bsutil/kverify/kverify.go | 23 +++++++++++++++++-- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 8 ++++++- pkg/minikube/logs/logs.go | 2 +- pkg/minikube/logs/logs_test.go | 1 + 4 files changed, 30 insertions(+), 4 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go index aa076cecb9..54f17565e1 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go @@ -34,7 +34,10 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" + "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/logs" ) // APIServerProcess waits for api server to be healthy returns error if it doesn't @@ -94,15 +97,27 @@ func SystemPods(client *kubernetes.Clientset, start time.Time, timeout time.Dura return nil } -// APIServerIsRunning waits for api server status to be running -func APIServerIsRunning(start time.Time, ip string, port int, timeout time.Duration) error { +// WaitForHealthyAPIServer waits for api server status to be running +func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner, start time.Time, ip string, port int, timeout time.Duration) error { glog.Infof("waiting for apiserver healthz status ...") hStart := time.Now() + + minLogTime := kconst.APICallRetryInterval * 10 healthz := func() (bool, error) { if time.Since(start) > timeout { return false, fmt.Errorf("cluster wait timed out during healthz check") } + // We're probably not going to recover, so show problems and slow polling + if time.Since(start) > minLogTime { + problems := logs.FindProblems(r, bs, cr) + if len(problems) > 0 { + logs.OutputProblems(problems, 5) + time.Sleep(kconst.APICallRetryInterval * 15) + } + time.Sleep(kconst.APICallRetryInterval * 5) + } + status, err := apiServerHealthz(net.ParseIP(ip), port) if err != nil { glog.Warningf("status: %v", err) @@ -175,6 +190,10 @@ func apiServerHealthz(ip net.IP, port int) (state.State, error) { if err != nil { return state.Stopped, nil } + if resp.StatusCode == http.StatusUnauthorized { + glog.Errorf("%s returned code %d (unauthorized). Please ensure that your apiserver authorization settings make sense!", url, resp.StatusCode) + return state.Error, nil + } if resp.StatusCode != http.StatusOK { glog.Warningf("%s response: %v %+v", url, err, resp) return state.Error, nil diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 8188b5af5a..d8be5c4d52 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -273,7 +273,13 @@ func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Dur return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name) } } - if err := kverify.APIServerIsRunning(start, ip, port, timeout); err != nil { + + cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) + if err != nil { + return err + } + + if err := kverify.WaitForHealthyAPIServer(cr, k, k.c, start, ip, port, timeout); err != nil { return err } diff --git a/pkg/minikube/logs/logs.go b/pkg/minikube/logs/logs.go index ef26e9d7d7..fb833bca2b 100644 --- a/pkg/minikube/logs/logs.go +++ b/pkg/minikube/logs/logs.go @@ -36,7 +36,7 @@ import ( ) // rootCauseRe is a regular expression that matches known failure root causes -var rootCauseRe = regexp.MustCompile(`^error: |eviction manager: pods.* evicted|unknown flag: --|forbidden.*no providers available|eviction manager:.*evicted|tls: bad certificate|kubelet.*no API client|kubelet.*No api server|STDIN.*127.0.0.1:8080|failed to create listener|address already in use|unable to evict any pods|eviction manager: unexpected error`) +var rootCauseRe = regexp.MustCompile(`^error: |eviction manager: pods.* evicted|unknown flag: --|forbidden.*no providers available|eviction manager:.*evicted|tls: bad certificate|kubelet.*no API client|kubelet.*No api server|STDIN.*127.0.0.1:8080|failed to create listener|address already in use|unable to evict any pods|eviction manager: unexpected error|Resetting AnonymousAuth to false`) // ignoreCauseRe is a regular expression that matches spurious errors to not surface var ignoreCauseRe = regexp.MustCompile("error: no objects passed to apply") diff --git a/pkg/minikube/logs/logs_test.go b/pkg/minikube/logs/logs_test.go index bfb8b14b55..8af8612e27 100644 --- a/pkg/minikube/logs/logs_test.go +++ b/pkg/minikube/logs/logs_test.go @@ -36,6 +36,7 @@ func TestIsProblem(t *testing.T) { {"no-objects-passed-to-apply #4010", false, "error: no objects passed to apply"}, {"bad-certificate #4251", true, "log.go:172] http: TLS handshake error from 127.0.0.1:49200: remote error: tls: bad certificate"}, {"ephemeral-eviction #5355", true, " eviction_manager.go:419] eviction manager: unexpected error when attempting to reduce ephemeral-storage pressure: wanted to free 9223372036854775807 bytes, but freed 0 bytes space with errors in image deletion"}, + {"anonymous-auth", true, "AnonymousAuth is not allowed with the AlwaysAllow authorizer. Resetting AnonymousAuth to false. You should use a different authorizer"}, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { From a24aa5dff72fcff3dfe4be0a5620d8131d96fd88 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 18 Mar 2020 15:04:56 -0700 Subject: [PATCH 098/668] dramatically simplify start code path --- cmd/minikube/cmd/cache.go | 4 +- cmd/minikube/cmd/kubectl.go | 4 +- cmd/minikube/cmd/node_start.go | 2 +- cmd/minikube/cmd/start.go | 6 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 8 +- pkg/minikube/cluster/cache.go | 168 ------- pkg/minikube/cluster/setup.go | 422 ------------------ pkg/minikube/node/node.go | 2 +- pkg/minikube/node/start.go | 443 +++++++++++++++++-- 9 files changed, 409 insertions(+), 650 deletions(-) delete mode 100644 pkg/minikube/cluster/cache.go delete mode 100644 pkg/minikube/cluster/setup.go diff --git a/cmd/minikube/cmd/cache.go b/cmd/minikube/cmd/cache.go index ab1f075853..eb91371984 100644 --- a/cmd/minikube/cmd/cache.go +++ b/cmd/minikube/cmd/cache.go @@ -19,10 +19,10 @@ package cmd import ( "github.com/spf13/cobra" cmdConfig "k8s.io/minikube/cmd/minikube/cmd/config" - "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/image" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/node" ) // cacheImageConfigKey is the config field name used to store which images we have previously cached @@ -75,7 +75,7 @@ var reloadCacheCmd = &cobra.Command{ Short: "reload cached images.", Long: "reloads images previously added using the 'cache add' subcommand", Run: func(cmd *cobra.Command, args []string) { - err := cluster.CacheAndLoadImagesInConfig() + err := node.CacheAndLoadImagesInConfig() if err != nil { exit.WithError("Failed to reload cached images", err) } diff --git a/cmd/minikube/cmd/kubectl.go b/cmd/minikube/cmd/kubectl.go index e5520d8153..e24943a7d4 100644 --- a/cmd/minikube/cmd/kubectl.go +++ b/cmd/minikube/cmd/kubectl.go @@ -25,10 +25,10 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -59,7 +59,7 @@ minikube kubectl -- get pods --namespace kube-system`, version = cc.KubernetesConfig.KubernetesVersion } - path, err := cluster.CacheKubectlBinary(version) + path, err := node.CacheKubectlBinary(version) if err != nil { out.ErrLn("Error caching kubectl: %v", err) } diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index 758b60b7a3..17e3da8694 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -61,7 +61,7 @@ var nodeStartCmd = &cobra.Command{ } // Start it up baby - node.Start(*cc, *n, nil) + node.Start(*cc, *n, nil, false) }, } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 6b063a88ba..9ab8a0e730 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -45,7 +45,6 @@ import ( "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" @@ -348,10 +347,7 @@ func runStart(cmd *cobra.Command, args []string) { } } - kubeconfig, err := cluster.InitialSetup(cc, n, existingAddons) - if err != nil { - exit.WithError("Starting node", err) - } + kubeconfig := node.Start(cc, n, existingAddons, true) numNodes := viper.GetInt(nodes) if numNodes > 1 { diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 0863688ea8..db8d9c3718 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -212,6 +212,10 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "setting up node") } + if err := k.applyNodeLabels(cfg); err != nil { + glog.Warningf("unable to apply node labels: %v", err) + } + if err := bsutil.AdjustResourceLimits(k.c); err != nil { glog.Warningf("unable to adjust resource limits: %v", err) } @@ -231,10 +235,6 @@ func (k *Bootstrapper) SetupNode(cfg config.ClusterConfig) error { } } - if err := k.applyNodeLabels(cfg); err != nil { - glog.Warningf("unable to apply node labels: %v", err) - } - return nil } diff --git a/pkg/minikube/cluster/cache.go b/pkg/minikube/cluster/cache.go deleted file mode 100644 index 6fcf303f27..0000000000 --- a/pkg/minikube/cluster/cache.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "os" - "runtime" - - "github.com/golang/glog" - "github.com/spf13/viper" - "golang.org/x/sync/errgroup" - cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" - "k8s.io/minikube/pkg/drivers/kic" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/constants" - "k8s.io/minikube/pkg/minikube/download" - "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/image" - "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/out" -) - -const ( - cacheImages = "cache-images" - cacheImageConfigKey = "cache" -) - -// BeginCacheKubernetesImages caches images required for kubernetes version in the background -func BeginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVersion string, cRuntime string) { - if download.PreloadExists(k8sVersion, cRuntime) { - glog.Info("Caching tarball of preloaded images") - err := download.Preload(k8sVersion, cRuntime) - if err == nil { - glog.Infof("Finished downloading the preloaded tar for %s on %s", k8sVersion, cRuntime) - return // don't cache individual images if preload is successful. - } - glog.Warningf("Error downloading preloaded artifacts will continue without preload: %v", err) - } - - if !viper.GetBool(cacheImages) { - return - } - - g.Go(func() error { - return machine.CacheImagesForBootstrapper(imageRepository, k8sVersion, viper.GetString(cmdcfg.Bootstrapper)) - }) -} - -// HandleDownloadOnly caches appropariate binaries and images -func HandleDownloadOnly(cacheGroup, kicGroup *errgroup.Group, k8sVersion string) { - // If --download-only, complete the remaining downloads and exit. - if !viper.GetBool("download-only") { - return - } - if err := doCacheBinaries(k8sVersion); err != nil { - exit.WithError("Failed to cache binaries", err) - } - if _, err := CacheKubectlBinary(k8sVersion); err != nil { - exit.WithError("Failed to cache kubectl", err) - } - WaitCacheRequiredImages(cacheGroup) - WaitDownloadKicArtifacts(kicGroup) - if err := saveImagesToTarFromConfig(); err != nil { - exit.WithError("Failed to cache images to tar", err) - } - out.T(out.Check, "Download complete!") - os.Exit(0) - -} - -// CacheKubectlBinary caches the kubectl binary -func CacheKubectlBinary(k8sVerison string) (string, error) { - binary := "kubectl" - if runtime.GOOS == "windows" { - binary = "kubectl.exe" - } - - return download.Binary(binary, k8sVerison, runtime.GOOS, runtime.GOARCH) -} - -// doCacheBinaries caches Kubernetes binaries in the foreground -func doCacheBinaries(k8sVersion string) error { - return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdcfg.Bootstrapper)) -} - -// BeginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available -func BeginDownloadKicArtifacts(g *errgroup.Group) { - glog.Info("Beginning downloading kic artifacts") - g.Go(func() error { - glog.Infof("Downloading %s to local daemon", kic.BaseImage) - return image.WriteImageToDaemon(kic.BaseImage) - }) -} - -// WaitDownloadKicArtifacts blocks until the required artifacts for KIC are downloaded. -func WaitDownloadKicArtifacts(g *errgroup.Group) { - if err := g.Wait(); err != nil { - glog.Errorln("Error downloading kic artifacts: ", err) - return - } - glog.Info("Successfully downloaded all kic artifacts") -} - -// WaitCacheRequiredImages blocks until the required images are all cached. -func WaitCacheRequiredImages(g *errgroup.Group) { - if !viper.GetBool(cacheImages) { - return - } - if err := g.Wait(); err != nil { - glog.Errorln("Error caching images: ", err) - } -} - -// saveImagesToTarFromConfig saves images to tar in cache which specified in config file. -// currently only used by download-only option -func saveImagesToTarFromConfig() error { - images, err := imagesInConfigFile() - if err != nil { - return err - } - if len(images) == 0 { - return nil - } - return image.SaveToDir(images, constants.ImageCacheDir) -} - -// CacheAndLoadImagesInConfig loads the images currently in the config file -// called by 'start' and 'cache reload' commands. -func CacheAndLoadImagesInConfig() error { - images, err := imagesInConfigFile() - if err != nil { - return err - } - if len(images) == 0 { - return nil - } - return machine.CacheAndLoadImages(images) -} - -func imagesInConfigFile() ([]string, error) { - configFile, err := config.ReadConfig(localpath.ConfigFile()) - if err != nil { - return nil, err - } - if values, ok := configFile[cacheImageConfigKey]; ok { - var images []string - for key := range values.(map[string]interface{}) { - images = append(images, key) - } - return images, nil - } - return []string{}, nil -} diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go deleted file mode 100644 index 513154ccce..0000000000 --- a/pkg/minikube/cluster/setup.go +++ /dev/null @@ -1,422 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "fmt" - "net" - "os" - "os/exec" - "strconv" - "strings" - "time" - - "github.com/blang/semver" - "github.com/docker/machine/libmachine" - "github.com/docker/machine/libmachine/host" - "github.com/golang/glog" - "github.com/pkg/errors" - "github.com/spf13/viper" - "golang.org/x/sync/errgroup" - cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" - "k8s.io/minikube/pkg/addons" - "k8s.io/minikube/pkg/drivers/kic/oci" - "k8s.io/minikube/pkg/minikube/bootstrapper" - "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/command" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/constants" - "k8s.io/minikube/pkg/minikube/cruntime" - "k8s.io/minikube/pkg/minikube/driver" - "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/kubeconfig" - "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/logs" - "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/minikube/proxy" - "k8s.io/minikube/pkg/util" - "k8s.io/minikube/pkg/util/retry" -) - -const ( - waitTimeout = "wait-timeout" - waitUntilHealthy = "wait" - embedCerts = "embed-certs" - keepContext = "keep-context" - imageRepository = "image-repository" - containerRuntime = "container-runtime" -) - -// InitialSetup performs all necessary operations on the initial control plane node when first spinning up a cluster -func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) (*kubeconfig.Settings, error) { - var kicGroup errgroup.Group - if driver.IsKIC(cc.Driver) { - BeginDownloadKicArtifacts(&kicGroup) - } - - var cacheGroup errgroup.Group - if !driver.BareMetal(cc.Driver) { - BeginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) - } - - // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. - // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. - if err := config.SaveProfile(viper.GetString(config.ProfileName), &cc); err != nil { - exit.WithError("Failed to save config", err) - } - - HandleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion) - WaitDownloadKicArtifacts(&kicGroup) - - mRunner, preExists, machineAPI, host := StartMachine(&cc, &n) - defer machineAPI.Close() - - // wait for preloaded tarball to finish downloading before configuring runtimes - WaitCacheRequiredImages(&cacheGroup) - - sv, err := util.ParseKubernetesVersion(n.KubernetesVersion) - if err != nil { - return nil, err - } - - // configure the runtime (docker, containerd, crio) - cr := ConfigureRuntimes(mRunner, cc.Driver, cc.KubernetesConfig, sv) - - // Must be written before bootstrap, otherwise health checks may flake due to stale IP - kubeconfig, err := setupKubeconfig(host, &cc, &n, cc.Name) - if err != nil { - exit.WithError("Failed to setup kubeconfig", err) - } - - // setup kubeadm (must come after setupKubeconfig) - bs := setupKubeAdm(machineAPI, cc, n) - - // pull images or restart cluster - out.T(out.Launch, "Launching Kubernetes ... ") - err = bs.StartCluster(cc) - if err != nil { - exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) - } - //configureMounts() - - if err := CacheAndLoadImagesInConfig(); err != nil { - out.T(out.FailureType, "Unable to load cached images from config file.") - } - - // enable addons, both old and new! - if existingAddons != nil { - addons.Start(viper.GetString(config.ProfileName), existingAddons, config.AddonList) - } - - // special ops for none , like change minikube directory. - // multinode super doesn't work on the none driver - if cc.Driver == driver.None && len(cc.Nodes) == 1 { - prepareNone() - } - - // Skip pre-existing, because we already waited for health - if viper.GetBool(waitUntilHealthy) && !preExists { - if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { - exit.WithError("Wait failed", err) - } - } - - return kubeconfig, nil - -} - -// ConfigureRuntimes does what needs to happen to get a runtime going. -func ConfigureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager { - co := cruntime.Config{ - Type: viper.GetString(containerRuntime), - Runner: runner, ImageRepository: k8s.ImageRepository, - KubernetesVersion: kv, - } - cr, err := cruntime.New(co) - if err != nil { - exit.WithError("Failed runtime", err) - } - - disableOthers := true - if driver.BareMetal(drvName) { - disableOthers = false - } - - // Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere. - if driver.IsVM(drvName) { - if err := cr.Preload(k8s); err != nil { - switch err.(type) { - case *cruntime.ErrISOFeature: - out.T(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err}) - default: - glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err) - } - - if err := machine.CacheImagesForBootstrapper(k8s.ImageRepository, k8s.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil { - exit.WithError("Failed to cache images", err) - } - } - } - - err = cr.Enable(disableOthers) - if err != nil { - exit.WithError("Failed to enable container runtime", err) - } - - return cr -} - -// setupKubeAdm adds any requested files into the VM before Kubernetes is started -func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper { - bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, n) - if err != nil { - exit.WithError("Failed to get bootstrapper", err) - } - for _, eo := range config.ExtraOptions { - out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) - } - // Loads cached images, generates config files, download binaries - if err := bs.UpdateCluster(cfg); err != nil { - exit.WithError("Failed to update cluster", err) - } - if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil { - exit.WithError("Failed to setup certs", err) - } - return bs -} - -func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { - addr, err := apiServerURL(*h, *cc, *n) - if err != nil { - exit.WithError("Failed to get API Server URL", err) - } - - if cc.KubernetesConfig.APIServerName != constants.APIServerName { - addr = strings.Replace(addr, n.IP, cc.KubernetesConfig.APIServerName, -1) - } - kcs := &kubeconfig.Settings{ - ClusterName: clusterName, - ClusterServerAddress: addr, - ClientCertificate: localpath.MakeMiniPath("client.crt"), - ClientKey: localpath.MakeMiniPath("client.key"), - CertificateAuthority: localpath.MakeMiniPath("ca.crt"), - KeepContext: viper.GetBool(keepContext), - EmbedCerts: viper.GetBool(embedCerts), - } - - kcs.SetPath(kubeconfig.PathFromEnv()) - if err := kubeconfig.Update(kcs); err != nil { - return kcs, err - } - return kcs, nil -} - -func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) { - hostname := "" - port := n.Port - var err error - if driver.IsKIC(h.DriverName) { - // for kic drivers we use 127.0.0.1 instead of node IP, - // because of Docker on MacOs limitations for reaching to container's IP. - hostname = oci.DefaultBindIPV4 - port, err = oci.ForwardedPort(h.DriverName, h.Name, port) - if err != nil { - return "", errors.Wrap(err, "host port binding") - } - } else { - hostname, err = h.Driver.GetIP() - if err != nil { - return "", errors.Wrap(err, "get ip") - } - } - - if cc.KubernetesConfig.APIServerName != constants.APIServerName { - hostname = cc.KubernetesConfig.APIServerName - } - return fmt.Sprintf("https://" + net.JoinHostPort(hostname, strconv.Itoa(port))), nil -} - -// StartMachine starts a VM -func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { - m, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Failed to get machine client", err) - } - host, preExists = startHost(m, *cfg, *node) - runner, err = machine.CommandRunner(host) - if err != nil { - exit.WithError("Failed to get command runner", err) - } - - ip := validateNetwork(host, runner) - - // Bypass proxy for minikube's vm host ip - err = proxy.ExcludeIP(ip) - if err != nil { - out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) - } - - // Save IP to config file for subsequent use - node.IP = ip - err = config.SaveNode(cfg, node) - if err != nil { - exit.WithError("saving node", err) - } - - return runner, preExists, m, host -} - -// startHost starts a new minikube host using a VM or None -func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) { - host, exists, err := machine.StartHost(api, mc, n) - if err != nil { - exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) - } - return host, exists -} - -// validateNetwork tries to catch network problems as soon as possible -func validateNetwork(h *host.Host, r command.Runner) string { - ip, err := h.Driver.GetIP() - if err != nil { - exit.WithError("Unable to get VM IP address", err) - } - - optSeen := false - warnedOnce := false - for _, k := range proxy.EnvVars { - if v := os.Getenv(k); v != "" { - if !optSeen { - out.T(out.Internet, "Found network options:") - optSeen = true - } - out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v}) - ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY - k = strings.ToUpper(k) // for http_proxy & https_proxy - if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { - out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"}) - warnedOnce = true - } - } - } - - if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) { - trySSH(h, ip) - } - - tryLookup(r) - tryRegistry(r) - return ip -} - -func trySSH(h *host.Host, ip string) { - if viper.GetBool("force") { - return - } - - sshAddr := net.JoinHostPort(ip, "22") - - dial := func() (err error) { - d := net.Dialer{Timeout: 3 * time.Second} - conn, err := d.Dial("tcp", sshAddr) - if err != nil { - out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err}) - return err - } - _ = conn.Close() - return nil - } - - if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil { - exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}} - - This is likely due to one of two reasons: - - - VPN or firewall interference - - {{.hypervisor}} network configuration issue - - Suggested workarounds: - - - Disable your local VPN or firewall software - - Configure your local VPN or firewall to allow access to {{.ip}} - - Restart or reinstall {{.hypervisor}} - - Use an alternative --vm-driver - - Use --force to override this connectivity check - `, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip}) - } -} - -func tryLookup(r command.Runner) { - // DNS check - if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil { - glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err) - // will try with without query type for ISOs with different busybox versions. - if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil { - glog.Warningf("nslookup failed: %v", err) - out.WarningT("Node may be unable to resolve external DNS records") - } - } -} -func tryRegistry(r command.Runner) { - // Try an HTTPS connection to the image repository - proxy := os.Getenv("HTTPS_PROXY") - opts := []string{"-sS"} - if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { - opts = append([]string{"-x", proxy}, opts...) - } - - repo := viper.GetString(imageRepository) - if repo == "" { - repo = images.DefaultKubernetesRepo - } - - opts = append(opts, fmt.Sprintf("https://%s/", repo)) - if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil { - glog.Warningf("%s failed: %v", rr.Args, err) - out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo}) - } -} - -// prepareNone prepares the user and host for the joy of the "none" driver -func prepareNone() { - out.T(out.StartingNone, "Configuring local host environment ...") - if viper.GetBool(config.WantNoneDriverWarning) { - out.T(out.Empty, "") - out.WarningT("The 'none' driver provides limited isolation and may reduce system security and reliability.") - out.WarningT("For more information, see:") - out.T(out.URL, "https://minikube.sigs.k8s.io/docs/reference/drivers/none/") - out.T(out.Empty, "") - } - - if os.Getenv("CHANGE_MINIKUBE_NONE_USER") == "" { - home := os.Getenv("HOME") - out.WarningT("kubectl and minikube configuration will be stored in {{.home_folder}}", out.V{"home_folder": home}) - out.WarningT("To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:") - - out.T(out.Empty, "") - out.T(out.Command, "sudo mv {{.home_folder}}/.kube {{.home_folder}}/.minikube $HOME", out.V{"home_folder": home}) - out.T(out.Command, "sudo chown -R $USER $HOME/.kube $HOME/.minikube") - out.T(out.Empty, "") - - out.T(out.Tip, "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true") - } - - if err := util.MaybeChownDirRecursiveToMinikubeUser(localpath.MiniPath()); err != nil { - exit.WithCodeT(exit.Permissions, "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}", out.V{"minikube_dir_path": localpath.MiniPath(), "error": err}) - } -} diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 7458ff80a3..55b2fdf298 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -39,7 +39,7 @@ func Add(cc *config.ClusterConfig, n config.Node) error { return err } - Start(*cc, n, nil) + Start(*cc, n, nil, false) return nil } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 0a5e3fc095..6f9b441366 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -17,94 +17,447 @@ limitations under the License. package node import ( + "fmt" + "net" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/blang/semver" + "github.com/docker/machine/libmachine" + "github.com/docker/machine/libmachine/host" + "github.com/golang/glog" + "github.com/pkg/errors" "github.com/spf13/viper" "golang.org/x/sync/errgroup" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/addons" + "k8s.io/minikube/pkg/drivers/kic/oci" + "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/cluster" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/kubeconfig" + "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/logs" + "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/proxy" "k8s.io/minikube/pkg/util" + "k8s.io/minikube/pkg/util/retry" +) + +const ( + waitTimeout = "wait-timeout" + waitUntilHealthy = "wait" + embedCerts = "embed-certs" + keepContext = "keep-context" + imageRepository = "image-repository" + containerRuntime = "container-runtime" ) // Start spins up a guest and starts the kubernetes node. -func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) { - // Now that the ISO is downloaded, pull images in the background while the VM boots. - var cacheGroup errgroup.Group - if !driver.BareMetal(cc.Driver) { - cluster.BeginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) - } - +func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) *kubeconfig.Settings { var kicGroup errgroup.Group if driver.IsKIC(cc.Driver) { - cluster.BeginDownloadKicArtifacts(&kicGroup) + beginDownloadKicArtifacts(&kicGroup) } - runner, _, mAPI, _ := cluster.StartMachine(&cc, &n) - defer mAPI.Close() - - bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n) - if err != nil { - exit.WithError("Failed to get bootstrapper", err) + var cacheGroup errgroup.Group + if !driver.BareMetal(cc.Driver) { + beginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) } - k8sVersion := n.KubernetesVersion - driverName := cc.Driver - // exits here in case of --download-only option. - cluster.HandleDownloadOnly(&cacheGroup, &kicGroup, k8sVersion) - cluster.WaitDownloadKicArtifacts(&kicGroup) + // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. + // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. + if err := config.SaveProfile(viper.GetString(config.ProfileName), &cc); err != nil { + exit.WithError("Failed to save config", err) + } + + handleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion) + waitDownloadKicArtifacts(&kicGroup) + + mRunner, preExists, machineAPI, host := startMachine(&cc, &n) + defer machineAPI.Close() // wait for preloaded tarball to finish downloading before configuring runtimes - cluster.WaitCacheRequiredImages(&cacheGroup) + waitCacheRequiredImages(&cacheGroup) - sv, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion) + sv, err := util.ParseKubernetesVersion(n.KubernetesVersion) if err != nil { exit.WithError("Failed to parse kubernetes version", err) } // configure the runtime (docker, containerd, crio) - cr := cluster.ConfigureRuntimes(runner, driverName, cc.KubernetesConfig, sv) - showVersionInfo(k8sVersion, cr) + cr := configureRuntimes(mRunner, cc.Driver, cc.KubernetesConfig, sv) + showVersionInfo(n.KubernetesVersion, cr) + + var bs bootstrapper.Bootstrapper + var kubeconfig *kubeconfig.Settings + if apiServer { + // Must be written before bootstrap, otherwise health checks may flake due to stale IP + kubeconfig, err = setupKubeconfig(host, &cc, &n, cc.Name) + if err != nil { + exit.WithError("Failed to setup kubeconfig", err) + } + + // setup kubeadm (must come after setupKubeconfig) + bs = setupKubeAdm(machineAPI, cc, n) + err = bs.StartCluster(cc) + if err != nil { + exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) + } + } else { + bs, err = cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n) + if err != nil { + exit.WithError("Failed to get bootstrapper", err) + } + + if err = bs.SetupCerts(cc.KubernetesConfig, n); err != nil { + exit.WithError("setting up certs", err) + } + + if err = bs.SetupNode(cc); err != nil { + exit.WithError("Failed to setup node", err) + } + } configureMounts() + if err := CacheAndLoadImagesInConfig(); err != nil { + out.T(out.FailureType, "Unable to load cached images from config file.") + } + // enable addons, both old and new! if existingAddons != nil { addons.Start(viper.GetString(config.ProfileName), existingAddons, config.AddonList) } - if err := bs.UpdateNode(cc, n, cr); err != nil { - exit.WithError("Failed to update node", err) + if apiServer { + // special ops for none , like change minikube directory. + // multinode super doesn't work on the none driver + if cc.Driver == driver.None && len(cc.Nodes) == 1 { + prepareNone() + } + + // Skip pre-existing, because we already waited for health + if viper.GetBool(waitUntilHealthy) && !preExists { + if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { + exit.WithError("Wait failed", err) + } + } + } else { + if err := bs.UpdateNode(cc, n, cr); err != nil { + exit.WithError("Updating node", err) + } + + cp, err := config.PrimaryControlPlane(&cc) + if err != nil { + exit.WithError("Getting primary control plane", err) + } + cpBs, err := cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, cp) + if err != nil { + exit.WithError("Getting bootstrapper", err) + } + + joinCmd, err := cpBs.GenerateToken(cc) + if err != nil { + exit.WithError("generating join token", err) + } + + if err = bs.JoinCluster(cc, n, joinCmd); err != nil { + exit.WithError("joining cluster", err) + } } - if err := cluster.CacheAndLoadImagesInConfig(); err != nil { - exit.WithError("Unable to load cached images from config file.", err) - } + return kubeconfig - if err = bs.SetupCerts(cc.KubernetesConfig, n); err != nil { - exit.WithError("setting up certs", err) - } +} - if err = bs.SetupNode(cc); err != nil { - exit.WithError("Failed to setup node", err) +// ConfigureRuntimes does what needs to happen to get a runtime going. +func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager { + co := cruntime.Config{ + Type: viper.GetString(containerRuntime), + Runner: runner, ImageRepository: k8s.ImageRepository, + KubernetesVersion: kv, } - - cp, err := config.PrimaryControlPlane(&cc) + cr, err := cruntime.New(co) if err != nil { - exit.WithError("Getting primary control plane", err) - } - cpBs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc, cp) - if err != nil { - exit.WithError("Getting bootstrapper", err) + exit.WithError("Failed runtime", err) } - joinCmd, err := cpBs.GenerateToken(cc) - if err != nil { - exit.WithError("generating join token", err) + disableOthers := true + if driver.BareMetal(drvName) { + disableOthers = false } - if err = bs.JoinCluster(cc, n, joinCmd); err != nil { - exit.WithError("joining cluster", err) + // Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere. + if driver.IsVM(drvName) { + if err := cr.Preload(k8s); err != nil { + switch err.(type) { + case *cruntime.ErrISOFeature: + out.T(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err}) + default: + glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err) + } + + if err := machine.CacheImagesForBootstrapper(k8s.ImageRepository, k8s.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil { + exit.WithError("Failed to cache images", err) + } + } + } + + err = cr.Enable(disableOthers) + if err != nil { + exit.WithError("Failed to enable container runtime", err) + } + + return cr +} + +// setupKubeAdm adds any requested files into the VM before Kubernetes is started +func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper { + bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, n) + if err != nil { + exit.WithError("Failed to get bootstrapper", err) + } + for _, eo := range config.ExtraOptions { + out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) + } + // Loads cached images, generates config files, download binaries + if err := bs.UpdateCluster(cfg); err != nil { + exit.WithError("Failed to update cluster", err) + } + if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil { + exit.WithError("Failed to setup certs", err) + } + return bs +} + +func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { + addr, err := apiServerURL(*h, *cc, *n) + if err != nil { + exit.WithError("Failed to get API Server URL", err) + } + + if cc.KubernetesConfig.APIServerName != constants.APIServerName { + addr = strings.Replace(addr, n.IP, cc.KubernetesConfig.APIServerName, -1) + } + kcs := &kubeconfig.Settings{ + ClusterName: clusterName, + ClusterServerAddress: addr, + ClientCertificate: localpath.MakeMiniPath("client.crt"), + ClientKey: localpath.MakeMiniPath("client.key"), + CertificateAuthority: localpath.MakeMiniPath("ca.crt"), + KeepContext: viper.GetBool(keepContext), + EmbedCerts: viper.GetBool(embedCerts), + } + + kcs.SetPath(kubeconfig.PathFromEnv()) + if err := kubeconfig.Update(kcs); err != nil { + return kcs, err + } + return kcs, nil +} + +func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) { + hostname := "" + port := n.Port + var err error + if driver.IsKIC(h.DriverName) { + // for kic drivers we use 127.0.0.1 instead of node IP, + // because of Docker on MacOs limitations for reaching to container's IP. + hostname = oci.DefaultBindIPV4 + port, err = oci.ForwardedPort(h.DriverName, h.Name, port) + if err != nil { + return "", errors.Wrap(err, "host port binding") + } + } else { + hostname, err = h.Driver.GetIP() + if err != nil { + return "", errors.Wrap(err, "get ip") + } + } + + if cc.KubernetesConfig.APIServerName != constants.APIServerName { + hostname = cc.KubernetesConfig.APIServerName + } + return fmt.Sprintf("https://" + net.JoinHostPort(hostname, strconv.Itoa(port))), nil +} + +// StartMachine starts a VM +func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { + m, err := machine.NewAPIClient() + if err != nil { + exit.WithError("Failed to get machine client", err) + } + host, preExists = startHost(m, *cfg, *node) + runner, err = machine.CommandRunner(host) + if err != nil { + exit.WithError("Failed to get command runner", err) + } + + ip := validateNetwork(host, runner) + + // Bypass proxy for minikube's vm host ip + err = proxy.ExcludeIP(ip) + if err != nil { + out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) + } + + // Save IP to config file for subsequent use + node.IP = ip + err = config.SaveNode(cfg, node) + if err != nil { + exit.WithError("saving node", err) + } + + return runner, preExists, m, host +} + +// startHost starts a new minikube host using a VM or None +func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) { + host, exists, err := machine.StartHost(api, mc, n) + if err != nil { + exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) + } + return host, exists +} + +// validateNetwork tries to catch network problems as soon as possible +func validateNetwork(h *host.Host, r command.Runner) string { + ip, err := h.Driver.GetIP() + if err != nil { + exit.WithError("Unable to get VM IP address", err) + } + + optSeen := false + warnedOnce := false + for _, k := range proxy.EnvVars { + if v := os.Getenv(k); v != "" { + if !optSeen { + out.T(out.Internet, "Found network options:") + optSeen = true + } + out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v}) + ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY + k = strings.ToUpper(k) // for http_proxy & https_proxy + if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { + out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"}) + warnedOnce = true + } + } + } + + if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) { + trySSH(h, ip) + } + + tryLookup(r) + tryRegistry(r) + return ip +} + +func trySSH(h *host.Host, ip string) { + if viper.GetBool("force") { + return + } + + sshAddr := net.JoinHostPort(ip, "22") + + dial := func() (err error) { + d := net.Dialer{Timeout: 3 * time.Second} + conn, err := d.Dial("tcp", sshAddr) + if err != nil { + out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err}) + return err + } + _ = conn.Close() + return nil + } + + if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil { + exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}} + + This is likely due to one of two reasons: + + - VPN or firewall interference + - {{.hypervisor}} network configuration issue + + Suggested workarounds: + + - Disable your local VPN or firewall software + - Configure your local VPN or firewall to allow access to {{.ip}} + - Restart or reinstall {{.hypervisor}} + - Use an alternative --vm-driver + - Use --force to override this connectivity check + `, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip}) + } +} + +func tryLookup(r command.Runner) { + // DNS check + if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil { + glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err) + // will try with without query type for ISOs with different busybox versions. + if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil { + glog.Warningf("nslookup failed: %v", err) + out.WarningT("Node may be unable to resolve external DNS records") + } + } +} +func tryRegistry(r command.Runner) { + // Try an HTTPS connection to the image repository + proxy := os.Getenv("HTTPS_PROXY") + opts := []string{"-sS"} + if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { + opts = append([]string{"-x", proxy}, opts...) + } + + repo := viper.GetString(imageRepository) + if repo == "" { + repo = images.DefaultKubernetesRepo + } + + opts = append(opts, fmt.Sprintf("https://%s/", repo)) + if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil { + glog.Warningf("%s failed: %v", rr.Args, err) + out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo}) + } +} + +// prepareNone prepares the user and host for the joy of the "none" driver +func prepareNone() { + out.T(out.StartingNone, "Configuring local host environment ...") + if viper.GetBool(config.WantNoneDriverWarning) { + out.T(out.Empty, "") + out.WarningT("The 'none' driver provides limited isolation and may reduce system security and reliability.") + out.WarningT("For more information, see:") + out.T(out.URL, "https://minikube.sigs.k8s.io/docs/reference/drivers/none/") + out.T(out.Empty, "") + } + + if os.Getenv("CHANGE_MINIKUBE_NONE_USER") == "" { + home := os.Getenv("HOME") + out.WarningT("kubectl and minikube configuration will be stored in {{.home_folder}}", out.V{"home_folder": home}) + out.WarningT("To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:") + + out.T(out.Empty, "") + out.T(out.Command, "sudo mv {{.home_folder}}/.kube {{.home_folder}}/.minikube $HOME", out.V{"home_folder": home}) + out.T(out.Command, "sudo chown -R $USER $HOME/.kube $HOME/.minikube") + out.T(out.Empty, "") + + out.T(out.Tip, "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true") + } + + if err := util.MaybeChownDirRecursiveToMinikubeUser(localpath.MiniPath()); err != nil { + exit.WithCodeT(exit.Permissions, "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}", out.V{"minikube_dir_path": localpath.MiniPath(), "error": err}) } } From add1c8f953fd6e45f8d937a54c1be198b565b92b Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 18 Mar 2020 15:10:34 -0700 Subject: [PATCH 099/668] missing file --- pkg/minikube/node/cache.go | 168 +++++++++++++++++++++++++++++++++++++ 1 file changed, 168 insertions(+) create mode 100644 pkg/minikube/node/cache.go diff --git a/pkg/minikube/node/cache.go b/pkg/minikube/node/cache.go new file mode 100644 index 0000000000..f1b3ac8f32 --- /dev/null +++ b/pkg/minikube/node/cache.go @@ -0,0 +1,168 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package node + +import ( + "os" + "runtime" + + "github.com/golang/glog" + "github.com/spf13/viper" + "golang.org/x/sync/errgroup" + cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" + "k8s.io/minikube/pkg/drivers/kic" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/download" + "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/image" + "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/out" +) + +const ( + cacheImages = "cache-images" + cacheImageConfigKey = "cache" +) + +// BeginCacheKubernetesImages caches images required for kubernetes version in the background +func beginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVersion string, cRuntime string) { + if download.PreloadExists(k8sVersion, cRuntime) { + glog.Info("Caching tarball of preloaded images") + err := download.Preload(k8sVersion, cRuntime) + if err == nil { + glog.Infof("Finished downloading the preloaded tar for %s on %s", k8sVersion, cRuntime) + return // don't cache individual images if preload is successful. + } + glog.Warningf("Error downloading preloaded artifacts will continue without preload: %v", err) + } + + if !viper.GetBool(cacheImages) { + return + } + + g.Go(func() error { + return machine.CacheImagesForBootstrapper(imageRepository, k8sVersion, viper.GetString(cmdcfg.Bootstrapper)) + }) +} + +// HandleDownloadOnly caches appropariate binaries and images +func handleDownloadOnly(cacheGroup, kicGroup *errgroup.Group, k8sVersion string) { + // If --download-only, complete the remaining downloads and exit. + if !viper.GetBool("download-only") { + return + } + if err := doCacheBinaries(k8sVersion); err != nil { + exit.WithError("Failed to cache binaries", err) + } + if _, err := CacheKubectlBinary(k8sVersion); err != nil { + exit.WithError("Failed to cache kubectl", err) + } + waitCacheRequiredImages(cacheGroup) + waitDownloadKicArtifacts(kicGroup) + if err := saveImagesToTarFromConfig(); err != nil { + exit.WithError("Failed to cache images to tar", err) + } + out.T(out.Check, "Download complete!") + os.Exit(0) + +} + +// CacheKubectlBinary caches the kubectl binary +func CacheKubectlBinary(k8sVerison string) (string, error) { + binary := "kubectl" + if runtime.GOOS == "windows" { + binary = "kubectl.exe" + } + + return download.Binary(binary, k8sVerison, runtime.GOOS, runtime.GOARCH) +} + +// doCacheBinaries caches Kubernetes binaries in the foreground +func doCacheBinaries(k8sVersion string) error { + return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdcfg.Bootstrapper)) +} + +// BeginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available +func beginDownloadKicArtifacts(g *errgroup.Group) { + glog.Info("Beginning downloading kic artifacts") + g.Go(func() error { + glog.Infof("Downloading %s to local daemon", kic.BaseImage) + return image.WriteImageToDaemon(kic.BaseImage) + }) +} + +// WaitDownloadKicArtifacts blocks until the required artifacts for KIC are downloaded. +func waitDownloadKicArtifacts(g *errgroup.Group) { + if err := g.Wait(); err != nil { + glog.Errorln("Error downloading kic artifacts: ", err) + return + } + glog.Info("Successfully downloaded all kic artifacts") +} + +// WaitCacheRequiredImages blocks until the required images are all cached. +func waitCacheRequiredImages(g *errgroup.Group) { + if !viper.GetBool(cacheImages) { + return + } + if err := g.Wait(); err != nil { + glog.Errorln("Error caching images: ", err) + } +} + +// saveImagesToTarFromConfig saves images to tar in cache which specified in config file. +// currently only used by download-only option +func saveImagesToTarFromConfig() error { + images, err := imagesInConfigFile() + if err != nil { + return err + } + if len(images) == 0 { + return nil + } + return image.SaveToDir(images, constants.ImageCacheDir) +} + +// CacheAndLoadImagesInConfig loads the images currently in the config file +// called by 'start' and 'cache reload' commands. +func CacheAndLoadImagesInConfig() error { + images, err := imagesInConfigFile() + if err != nil { + return err + } + if len(images) == 0 { + return nil + } + return machine.CacheAndLoadImages(images) +} + +func imagesInConfigFile() ([]string, error) { + configFile, err := config.ReadConfig(localpath.ConfigFile()) + if err != nil { + return nil, err + } + if values, ok := configFile[cacheImageConfigKey]; ok { + var images []string + for key := range values.(map[string]interface{}) { + images = append(images, key) + } + return images, nil + } + return []string{}, nil +} From 5d3816f64043ef36dc4ae483230c592189db7c52 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 18 Mar 2020 15:47:46 -0700 Subject: [PATCH 100/668] Allow problems to be announced at all stages, add more regexes --- .../bootstrapper/bsutil/kverify/kverify.go | 40 ++++++++++++++----- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 23 ++++++----- pkg/minikube/logs/logs.go | 28 ++++++++++++- pkg/minikube/logs/logs_test.go | 7 ++++ 4 files changed, 76 insertions(+), 22 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go index 54f17565e1..e420307c65 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go @@ -40,15 +40,22 @@ import ( "k8s.io/minikube/pkg/minikube/logs" ) -// APIServerProcess waits for api server to be healthy returns error if it doesn't -func APIServerProcess(runner command.Runner, start time.Time, timeout time.Duration) error { +// WaitForAPIServerProcess waits for api server to be healthy returns error if it doesn't +func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner, start time.Time, timeout time.Duration) error { glog.Infof("waiting for apiserver process to appear ...") + minLogTime := kconst.APICallRetryInterval * 10 + err := wait.PollImmediate(time.Millisecond*500, timeout, func() (bool, error) { if time.Since(start) > timeout { return false, fmt.Errorf("cluster wait timed out during process check") } - if _, ierr := apiServerPID(runner); ierr != nil { + if time.Since(start) > minLogTime { + announceProblems(r, bs, cr) + time.Sleep(kconst.APICallRetryInterval * 5) + } + + if _, ierr := apiServerPID(cr); ierr != nil { return false, nil } return true, nil @@ -70,14 +77,21 @@ func apiServerPID(cr command.Runner) (int, error) { return strconv.Atoi(s) } -// SystemPods verifies essential pods for running kurnetes is running -func SystemPods(client *kubernetes.Clientset, start time.Time, timeout time.Duration) error { +// WaitForSystemPods verifies essential pods for running kurnetes is running +func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner, client *kubernetes.Clientset, start time.Time, timeout time.Duration) error { glog.Info("waiting for kube-system pods to appear ...") pStart := time.Now() + minLogTime := kconst.APICallRetryInterval * 10 + podList := func() (bool, error) { if time.Since(start) > timeout { return false, fmt.Errorf("cluster wait timed out during pod check") } + if time.Since(start) > minLogTime { + announceProblems(r, bs, cr) + time.Sleep(kconst.APICallRetryInterval * 5) + } + // Wait for any system pod, as waiting for apiserver may block until etcd pods, err := client.CoreV1().Pods("kube-system").List(meta.ListOptions{}) if err != nil { @@ -108,13 +122,8 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c return false, fmt.Errorf("cluster wait timed out during healthz check") } - // We're probably not going to recover, so show problems and slow polling if time.Since(start) > minLogTime { - problems := logs.FindProblems(r, bs, cr) - if len(problems) > 0 { - logs.OutputProblems(problems, 5) - time.Sleep(kconst.APICallRetryInterval * 15) - } + announceProblems(r, bs, cr) time.Sleep(kconst.APICallRetryInterval * 5) } @@ -136,6 +145,15 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c return nil } +// announceProblems checks for problems, and slows polling down if any are found +func announceProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner) { + problems := logs.FindProblems(r, bs, cr) + if len(problems) > 0 { + logs.OutputProblems(problems, 5) + time.Sleep(kconst.APICallRetryInterval * 15) + } +} + // APIServerStatus returns apiserver status in libmachine style state.State func APIServerStatus(cr command.Runner, ip net.IP, port int) (state.State, error) { glog.Infof("Checking apiserver status ...") diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index d8be5c4d52..39618cf8a2 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -260,7 +260,12 @@ func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Dur if err != nil { return err } - if err := kverify.APIServerProcess(k.c, start, timeout); err != nil { + cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) + if err != nil { + return err + } + + if err := kverify.WaitForAPIServerProcess(cr, k, k.c, start, timeout); err != nil { return err } @@ -274,11 +279,6 @@ func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Dur } } - cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) - if err != nil { - return err - } - if err := kverify.WaitForHealthyAPIServer(cr, k, k.c, start, ip, port, timeout); err != nil { return err } @@ -288,7 +288,7 @@ func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Dur return errors.Wrap(err, "get k8s client") } - if err := kverify.SystemPods(c, start, timeout); err != nil { + if err := kverify.WaitForSystemPods(cr, k, k.c, c, start, timeout); err != nil { return errors.Wrap(err, "waiting for system pods") } return nil @@ -335,8 +335,13 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { } } + cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) + if err != nil { + return err + } + // We must ensure that the apiserver is healthy before proceeding - if err := kverify.APIServerProcess(k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { + if err := kverify.WaitForAPIServerProcess(cr, k, k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { return errors.Wrap(err, "apiserver healthz") } @@ -355,7 +360,7 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "getting k8s client") } - if err := kverify.SystemPods(client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { + if err := kverify.WaitForSystemPods(cr, k, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { return errors.Wrap(err, "system pods") } diff --git a/pkg/minikube/logs/logs.go b/pkg/minikube/logs/logs.go index fb833bca2b..2ad54e3ef9 100644 --- a/pkg/minikube/logs/logs.go +++ b/pkg/minikube/logs/logs.go @@ -35,8 +35,31 @@ import ( "k8s.io/minikube/pkg/minikube/out" ) -// rootCauseRe is a regular expression that matches known failure root causes -var rootCauseRe = regexp.MustCompile(`^error: |eviction manager: pods.* evicted|unknown flag: --|forbidden.*no providers available|eviction manager:.*evicted|tls: bad certificate|kubelet.*no API client|kubelet.*No api server|STDIN.*127.0.0.1:8080|failed to create listener|address already in use|unable to evict any pods|eviction manager: unexpected error|Resetting AnonymousAuth to false`) +// rootCauses are regular expressions that match known failures +var rootCauses = []string{ + `^error: `, + `eviction manager: pods.* evicted`, + `unknown flag: --`, + `forbidden.*no providers available`, + `eviction manager:.*evicted`, + `tls: bad certificate`, + `kubelet.*no API client`, + `kubelet.*No api server`, + `STDIN.*127.0.0.1:8080`, + `failed to create listener`, + `address already in use`, + `unable to evict any pods`, + `eviction manager: unexpected error`, + `Resetting AnonymousAuth to false`, + `CrashLoopBackOff`, + `Unable to register node.*forbidden`, + `Failed to initialize CSINodeInfo.*forbidden`, + `Failed to admit pod`, + `failed to "StartContainer"`, +} + +// rootCauseRe combines rootCauses into a single regex +var rootCauseRe = regexp.MustCompile(strings.Join(rootCauses, "|")) // ignoreCauseRe is a regular expression that matches spurious errors to not surface var ignoreCauseRe = regexp.MustCompile("error: no objects passed to apply") @@ -44,6 +67,7 @@ var ignoreCauseRe = regexp.MustCompile("error: no objects passed to apply") // importantPods are a list of pods to retrieve logs for, in addition to the bootstrapper logs. var importantPods = []string{ "kube-apiserver", + "etcd", "coredns", "kube-scheduler", "kube-proxy", diff --git a/pkg/minikube/logs/logs_test.go b/pkg/minikube/logs/logs_test.go index 8af8612e27..4d14ed857c 100644 --- a/pkg/minikube/logs/logs_test.go +++ b/pkg/minikube/logs/logs_test.go @@ -37,6 +37,13 @@ func TestIsProblem(t *testing.T) { {"bad-certificate #4251", true, "log.go:172] http: TLS handshake error from 127.0.0.1:49200: remote error: tls: bad certificate"}, {"ephemeral-eviction #5355", true, " eviction_manager.go:419] eviction manager: unexpected error when attempting to reduce ephemeral-storage pressure: wanted to free 9223372036854775807 bytes, but freed 0 bytes space with errors in image deletion"}, {"anonymous-auth", true, "AnonymousAuth is not allowed with the AlwaysAllow authorizer. Resetting AnonymousAuth to false. You should use a different authorizer"}, + {"disk-pressure #7073", true, "eviction_manager.go:159] Failed to admit pod kindnet-jpzzf_kube-system(b63b1ee0-0fc6-428f-8e67-e357464f579c) - node has conditions: [DiskPressure]"}, + {"csi timeout", true, `Failed to initialize CSINodeInfo: error updating CSINode annotation: timed out waiting for the condition; caused by: csinodes.storage.k8s.io "m01" is forbidden: User "system:node:m01" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope`}, + {"node registration permissions", true, `Unable to register node "m01" with API server: nodes is forbidden: User "system:node:m01" cannot create resource "nodes" in API group "" at the cluster scope`}, + {"regular kubelet refused", false, `kubelet_node_status.go:92] Unable to register node "m01" with API server: Post https://localhost:8443/api/v1/nodes: dial tcp 127.0.0.1:8443: connect: connection refused`}, + {"regular csi refused", false, `Failed to initialize CSINodeInfo: error updating CSINode annotation: timed out waiting for the condition; caused by: Get https://localhost:8443/apis/storage.k8s.io/v1/csinodes/m01: dial tcp 127.0.0.1:8443: connect: connection refused`}, + {"apiserver crashloop", true, `pod_workers.go:191] Error syncing pod 9f8ee739bd14e8733f807eb2be99768f ("kube-apiserver-m01_kube-system(9f8ee739bd14e8733f807eb2be99768f)"), skipping: failed to "StartContainer" for "kube-apiserver" with CrashLoopBackOff: "back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-m01_kube-system(9f8ee739bd14e8733f807eb2be99768f)`}, + {"kubelet node timeout", false, `failed to ensure node lease exists, will retry in 6.4s, error: Get https://localhost:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/m01?timeout=10s: dial tcp 127.0.0.1:8443: connect: connection refused`}, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { From 9420a941f9c23bf79d375f67273e131fcd3841eb Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 18 Mar 2020 16:22:23 -0700 Subject: [PATCH 101/668] Remove false positives --- cmd/minikube/cmd/logs.go | 2 +- pkg/minikube/logs/logs.go | 2 ++ pkg/minikube/logs/logs_test.go | 5 +++++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/logs.go b/cmd/minikube/cmd/logs.go index cf36b52cb2..a1856d7bc4 100644 --- a/cmd/minikube/cmd/logs.go +++ b/cmd/minikube/cmd/logs.go @@ -32,7 +32,7 @@ import ( const ( // number of problems per log to output - numberOfProblems = 5 + numberOfProblems = 10 ) var ( diff --git a/pkg/minikube/logs/logs.go b/pkg/minikube/logs/logs.go index 2ad54e3ef9..e4ec4a9047 100644 --- a/pkg/minikube/logs/logs.go +++ b/pkg/minikube/logs/logs.go @@ -56,6 +56,8 @@ var rootCauses = []string{ `Failed to initialize CSINodeInfo.*forbidden`, `Failed to admit pod`, `failed to "StartContainer"`, + `kubelet.*forbidden.*cannot \w+ resource`, + `leases.*forbidden.*cannot \w+ resource`, } // rootCauseRe combines rootCauses into a single regex diff --git a/pkg/minikube/logs/logs_test.go b/pkg/minikube/logs/logs_test.go index 4d14ed857c..918ba60cfb 100644 --- a/pkg/minikube/logs/logs_test.go +++ b/pkg/minikube/logs/logs_test.go @@ -44,6 +44,11 @@ func TestIsProblem(t *testing.T) { {"regular csi refused", false, `Failed to initialize CSINodeInfo: error updating CSINode annotation: timed out waiting for the condition; caused by: Get https://localhost:8443/apis/storage.k8s.io/v1/csinodes/m01: dial tcp 127.0.0.1:8443: connect: connection refused`}, {"apiserver crashloop", true, `pod_workers.go:191] Error syncing pod 9f8ee739bd14e8733f807eb2be99768f ("kube-apiserver-m01_kube-system(9f8ee739bd14e8733f807eb2be99768f)"), skipping: failed to "StartContainer" for "kube-apiserver" with CrashLoopBackOff: "back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-m01_kube-system(9f8ee739bd14e8733f807eb2be99768f)`}, {"kubelet node timeout", false, `failed to ensure node lease exists, will retry in 6.4s, error: Get https://localhost:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/m01?timeout=10s: dial tcp 127.0.0.1:8443: connect: connection refused`}, + {"rbac misconfiguration", true, `leases.coordination.k8s.io "m01" is forbidden: User "system:node:m01" cannot get resource "leases" in API group "coordination.k8s.io" in the namespace "kube-node-lease"`}, + {"regular controller init", false, `error retrieving resource lock kube-system/kube-controller-manager: endpoints "kube-controller-manager" is forbidden: User "system:kube-controller-manager" cannot get resource "endpoints" in API group "" in the namespace "kube-system"`}, + {"regular scheduler services init", false, ` k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope`}, + {"regular scheduler nodes init", false, `k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope`}, + {"kubelet rbac fail", true, `k8s.io/kubernetes/pkg/kubelet/kubelet.go:526: Failed to list *v1.Node: nodes "m01" is forbidden: User "system:node:m01" cannot list resource "nodes" in API group "" at the cluster scope`}, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { From 68f973c193111bb79cbf79cb8578333ab0c77d71 Mon Sep 17 00:00:00 2001 From: Prasad Katti Date: Wed, 18 Mar 2020 16:26:42 -0700 Subject: [PATCH 102/668] Add describe nodes to minikube logs --- pkg/minikube/logs/logs.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pkg/minikube/logs/logs.go b/pkg/minikube/logs/logs.go index ef26e9d7d7..350eacc8f7 100644 --- a/pkg/minikube/logs/logs.go +++ b/pkg/minikube/logs/logs.go @@ -23,16 +23,20 @@ import ( "fmt" "os" "os/exec" + "path" "regexp" "sort" "strings" "github.com/golang/glog" "github.com/pkg/errors" + "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/vmpath" ) // rootCauseRe is a regular expression that matches known failure root causes @@ -186,5 +190,15 @@ func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, length int, f } cmds[r.Name()] = r.SystemLogCmd(length) cmds["container status"] = cruntime.ContainerStatusCommand() + + cfg, err := config.Load(viper.GetString(config.ProfileName)) + if err != nil && !config.IsNotExist(err) { + out.ErrLn("Error loading profile config: %v", err) + } + + cmds["describe nodes"] = fmt.Sprintf("sudo %s describe node -A --kubeconfig=%s", + path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"), + path.Join(vmpath.GuestPersistentDir, "kubeconfig")) + return cmds } From efac79eb26e060f1daf68c4002fcb08ddd41e457 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 18 Mar 2020 17:00:29 -0700 Subject: [PATCH 103/668] account for hyphens in profile name --- pkg/minikube/driver/driver.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index 170b99e71a..b7fd18b0c2 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -240,7 +240,8 @@ func MachineName(cc config.ClusterConfig, n config.Node) string { // ClusterNameFromMachine retrieves the cluster name embedded in the machine name func ClusterNameFromMachine(name string) string { if strings.Contains(name, "-") { - return strings.Split(name, "-")[0] + a := strings.Split(name, "-") + return strings.Join(a[0:len(a)-2], "-") } return name } From fa97a5bf0d0f7a0b4a67512b490a8caf418d9fdd Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 18 Mar 2020 18:59:15 -0700 Subject: [PATCH 104/668] fix machine name creation --- pkg/minikube/driver/driver.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index b7fd18b0c2..ebee96ffca 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -234,14 +234,13 @@ func MachineName(cc config.ClusterConfig, n config.Node) string { if len(cc.Nodes) == 1 || n.ControlPlane { return cc.Name } - return fmt.Sprintf("%s-%s", cc.Name, n.Name) + return fmt.Sprintf("%s---%s", cc.Name, n.Name) } // ClusterNameFromMachine retrieves the cluster name embedded in the machine name func ClusterNameFromMachine(name string) string { - if strings.Contains(name, "-") { - a := strings.Split(name, "-") - return strings.Join(a[0:len(a)-2], "-") + if strings.Contains(name, "---") { + return strings.Split(name, "---")[0] } return name } From f9b38dc04ea6b339b1b783a6a801b773c010b505 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 18 Mar 2020 21:28:03 -0700 Subject: [PATCH 105/668] clean up status output and have multinode survive cluster restarts --- cmd/minikube/cmd/start.go | 3 +++ cmd/minikube/cmd/status.go | 21 ++++++++++++--------- cmd/minikube/cmd/status_test.go | 6 +++--- pkg/minikube/driver/driver.go | 6 +++--- pkg/provision/provision.go | 2 +- 5 files changed, 22 insertions(+), 16 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 9ab8a0e730..44e01cf7a9 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -350,6 +350,9 @@ func runStart(cmd *cobra.Command, args []string) { kubeconfig := node.Start(cc, n, existingAddons, true) numNodes := viper.GetInt(nodes) + if numNodes == 1 && existing != nil { + numNodes = len(existing.Nodes) + } if numNodes > 1 { if driver.BareMetal(driverName) { out.T(out.Meh, "The none driver is not compatible with multi-node clusters.") diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index a153a7b932..44b96bc8e2 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -67,6 +67,7 @@ type Status struct { Kubelet string APIServer string Kubeconfig string + Worker bool } const ( @@ -78,6 +79,12 @@ host: {{.Host}} kubelet: {{.Kubelet}} apiserver: {{.APIServer}} kubeconfig: {{.Kubeconfig}} + +` + workerStatusFormat = `{{.Name}} +host: {{.Host}} +kubelet: {{.Kubelet}} + ` ) @@ -153,15 +160,7 @@ func exitCode(st *Status) int { func status(api libmachine.API, name string, controlPlane bool) (*Status, error) { - var profile, node string - - if strings.Contains(name, "-") { - profile = strings.Split(name, "-")[0] - node = strings.Split(name, "-")[1] - } else { - profile = name - node = name - } + profile, node := driver.ClusterNameFromMachine(name) st := &Status{ Name: node, @@ -169,6 +168,7 @@ func status(api libmachine.API, name string, controlPlane bool) (*Status, error) APIServer: Nonexistent, Kubelet: Nonexistent, Kubeconfig: Nonexistent, + Worker: !controlPlane, } hs, err := machine.Status(api, name) @@ -265,6 +265,9 @@ For the list accessible variables for the template, see the struct values here: func statusText(st *Status, w io.Writer) error { tmpl, err := template.New("status").Parse(statusFormat) + if st.Worker && statusFormat == defaultStatusFormat { + tmpl, err = template.New("worker-status").Parse(workerStatusFormat) + } if err != nil { return err } diff --git a/cmd/minikube/cmd/status_test.go b/cmd/minikube/cmd/status_test.go index 44f4133dfd..b11e549a6d 100644 --- a/cmd/minikube/cmd/status_test.go +++ b/cmd/minikube/cmd/status_test.go @@ -52,17 +52,17 @@ func TestStatusText(t *testing.T) { { name: "ok", state: &Status{Name: "minikube", Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured}, - want: "minikube\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n", + want: "minikube\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n\n", }, { name: "paused", state: &Status{Name: "minikube", Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured}, - want: "minikube\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n", + want: "minikube\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n\n", }, { name: "down", state: &Status{Name: "minikube", Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured}, - want: "minikube\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", + want: "minikube\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", }, } for _, tc := range tests { diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index ebee96ffca..b6106474d8 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -238,9 +238,9 @@ func MachineName(cc config.ClusterConfig, n config.Node) string { } // ClusterNameFromMachine retrieves the cluster name embedded in the machine name -func ClusterNameFromMachine(name string) string { +func ClusterNameFromMachine(name string) (string, string) { if strings.Contains(name, "---") { - return strings.Split(name, "---")[0] + return strings.Split(name, "---")[0], strings.Split(name, "---")[1] } - return name + return name, name } diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index fd84405266..acad46c3ac 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -196,7 +196,7 @@ func setRemoteAuthOptions(p provision.Provisioner) auth.Options { } func setContainerRuntimeOptions(name string, p miniProvisioner) error { - cluster := driver.ClusterNameFromMachine(name) + cluster, _ := driver.ClusterNameFromMachine(name) c, err := config.Load(cluster) if err != nil { return errors.Wrap(err, "getting cluster config") From b5e8c20e0f4fb96596a97b0ba888b077c06bdfb5 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 19 Mar 2020 10:49:43 -0700 Subject: [PATCH 106/668] add in daily triage section --- site/content/en/docs/Contributing/triage.md | 65 ++++++++++++++++++++- 1 file changed, 64 insertions(+), 1 deletion(-) diff --git a/site/content/en/docs/Contributing/triage.md b/site/content/en/docs/Contributing/triage.md index 7ba7d66287..efb76ef1ad 100644 --- a/site/content/en/docs/Contributing/triage.md +++ b/site/content/en/docs/Contributing/triage.md @@ -19,9 +19,72 @@ If you're interested in helping out with minikube triage, this doc covers the ba Additionally, if you'd be interested in participating in our weekly triage meeting, please fill out this [form](https://forms.gle/vNtWZSWXqeYaaNbU9) to express interest. Thank you! -# Labeling Issues +# Daily Triage +Daily triage has two goals: +1. Responsiveness for new issues +1. Responsiveness when explicitly requested information was provided +The list of outstanding items are at https://teaparty-tts3vkcpgq-uc.a.run.app/s/daily-triage - it covers: + +1. Issues without a `kind/` or `triage/` label +1. Issues without a `priority/` label +1. `triage/needs-information` issues which the user has followed up on + +## Categorization + +The most important level of categorizing the issue is defining what type it is. +We typically want at least one of the following labels on every issue, and some issues may fall into multiple categories: + +- `triage/support` - The default for most incoming issues +- `kind/bug` - When it’s a bug or we aren’t delivering the best user experience + +Other possibilities: +- `kind/feature`- Identify new feature requests +- `kind/flake` - Used for flaky integration or unit tests +- `kind/cleanup` - Cleaning up/refactoring the codebase +- `kind/documentation` - Updates or additions to minikube documentation +- `kind/ux` - Issues that involve improving user experience +- `kind/security` - When there's a security vulnerability in minikube + +If the issue is specific to an operating system, hypervisor, container, addon, or Kubernetes component: + +os/ - When the issue appears specific to an operating system + - `os/linux` + - `os/macos` + - `os/windows` +co/ - When the issue appears specific to a driver + - `co/hyperkit` + - `co/hyperv` + - `co/kvm2` + - `co/none-driver` + - `co/docker-driver` + - `co/virtualbox` +co/ - When the issue appears specific to a k8s component + - `co/apiserver` + - `co/etcd` + - `co/coredns` + - `co/dashboard` + - `co/kube-proxy` + - `co/kubeadm` + - `co/kubelet` + - `co/kubeconfig` + + +Other useful tags: + +Did an **Event** occur that we can dedup similar issues against? +- `ev/CrashLoopBackoff` +- `ev/Panic` +- `ev/Pending` +- `ev/kubeadm-exit-1` +Suspected **Root cause**: +- `cause/vm-environment` +- `cause/invalid-kubelet-options` + +**Help wanted?** +`Good First Issue` - bug has a proposed solution, can be implemented w/o further discussion. +`Help wanted` - if the bug could use help from a contributor From eaa54a28d10ee666357ff66f8d5b497e52e828a6 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 19 Mar 2020 10:51:10 -0700 Subject: [PATCH 107/668] Add prioritization --- site/content/en/docs/Contributing/triage.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/site/content/en/docs/Contributing/triage.md b/site/content/en/docs/Contributing/triage.md index efb76ef1ad..b1ad150f1b 100644 --- a/site/content/en/docs/Contributing/triage.md +++ b/site/content/en/docs/Contributing/triage.md @@ -87,6 +87,19 @@ Suspected **Root cause**: `Help wanted` - if the bug could use help from a contributor +## Prioritization +If the issue is not `triage/support`, it needs a [priority label](https://github.com/kubernetes/community/blob/master/contributors/guide/issue-triage.md#define-priority): + +`priority/critical-urgent` - someones top priority ASAP, such as security issue, user-visible bug, or build breakage. Rarely used. + +`priority/important-soon`: in time for the next two releases (8 weeks) + +`priority/important-longterm`: 2-4 releases from now + +`priority/backlog`: agreed that this would be good to have, but no one is available at the moment. Consider tagging as `help wanted` + +`priority/awaiting-more-evidence`: may be more useful, but there is not yet enough support. + # Responding to Issues From 9a76c2b33c06895081dd80c2cf8341f4890b2587 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 19 Mar 2020 11:01:24 -0700 Subject: [PATCH 108/668] add weekly and post-release triage --- site/content/en/docs/Contributing/triage.md | 71 +++++++++++++++++++-- 1 file changed, 65 insertions(+), 6 deletions(-) diff --git a/site/content/en/docs/Contributing/triage.md b/site/content/en/docs/Contributing/triage.md index b1ad150f1b..ab0dec23c9 100644 --- a/site/content/en/docs/Contributing/triage.md +++ b/site/content/en/docs/Contributing/triage.md @@ -101,11 +101,59 @@ If the issue is not `triage/support`, it needs a [priority label](https://github `priority/awaiting-more-evidence`: may be more useful, but there is not yet enough support. -# Responding to Issues +# Weekly Triage -Many issues in the minikube repo fall into one of the following categories: -- Needs more information from the author to be actionable -- Duplicate Issue +Weekly triage has three goals: + +1. Catching up on unresponded issues +1. Reviewing and closing PR’s +1. Closing stale issues + +The list of outstanding items can be found at https://teaparty-tts3vkcpgq-uc.a.run.app/s/weekly-triage. + +## Post-Release Triage + +Post-release triage occurs after a major release (around every 4-6 weeks). +It focuses on: + +1. Closing bugs that have been resolved by the release +1. Reprioritizing bugs that have not been resolved by the release +1. Letting users know if we believe that there is still an issue + +This includes reviewing: + +1. Every issue that hasn’t been touched in the last 2 days +1. Re-evaluation of long-term issues +1. Re-evaluation of short-term issues + + +## Responding to Issues + +### Needs More Information +A sample response to ask for more info: + +``` +I don’t yet have a clear way to replicate this issue. Do you mind adding some additional details? Here is additional information that would be helpful: + +* The exact `minikube start` command line used +* The full output of the `minikube start` command, preferably with `--alsologtostderr -v=4` for extra logging. + * The full output of `minikube logs` +* The full output of `kubectl get po -A` + + +Thank you for sharing your experience! +``` + +Then: Label with `triage/needs-information`. + +### Issue might be resolved +If you think a release may have resolved an issue, ask the author to see if their issue has been resolved: + +``` +Could you please check to see if minikube addresses this issue? We've made some changes with how this is handled, and improved the minikube logs output to help us debug tricky cases like this. +``` + +Then: Label with `triage/needs-information`. ## Closing with Care @@ -121,22 +169,30 @@ In any of these situations, we aim to be kind when closing the issue, and offer Samples responses for these situations include: ### Issue has been addressed - +``` @author: I believe this issue is now addressed by minikube v1.4, as it . If you still see this issue with minikube v1.4 or higher, please reopen this issue by commenting with `/reopen` Thank you for reporting this issue! +``` + +Then: Close the issue ### Duplicate Issue - +``` This issue appears to be a duplicate of #X, do you mind if we move the conversation there? This way we can centralize the content relating to the issue. If you feel that this issue is not in fact a duplicate, please re-open it using `/reopen`. If you have additional information to share, please add it to the new issue. Thank you for reporting this! +``` + +Then: Label with `triage/duplicate` and close the issue. ### Lack of Information +If an issue hasn't been active for more than four weeks, and the author has been pinged at least once, then the issue can be closed. +``` Hey @author -- hopefully it's OK if I close this - there wasn't enough information to make it actionable, and some time has already passed. If you are able to provide additional details, you may reopen it at any point by adding /reopen to your comment. Here is additional information that may be helpful to us: @@ -147,3 +203,6 @@ Here is additional information that may be helpful to us: * The full output of `minikube logs` Thank you for sharing your experience! +``` + +Then: Close the issue. From 0e0ddbcfb37f9862b88af2ff7f59803a26ea401f Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 19 Mar 2020 11:16:34 -0700 Subject: [PATCH 109/668] Polish --- site/content/en/docs/Contributing/triage.md | 92 ++++++++++++--------- 1 file changed, 51 insertions(+), 41 deletions(-) diff --git a/site/content/en/docs/Contributing/triage.md b/site/content/en/docs/Contributing/triage.md index ab0dec23c9..5fb1e24ab7 100644 --- a/site/content/en/docs/Contributing/triage.md +++ b/site/content/en/docs/Contributing/triage.md @@ -13,7 +13,7 @@ A well organized repo allows maintainers to prioritize feature requests, fix bug Triage includes: - Labeling issues - Responding to issues -- Closing issues (under certain circumstances!) +- Closing issues If you're interested in helping out with minikube triage, this doc covers the basics of doing so. @@ -29,7 +29,7 @@ The list of outstanding items are at https://teaparty-tts3vkcpgq-uc.a.run.app/s/ 1. Issues without a `kind/` or `triage/` label 1. Issues without a `priority/` label -1. `triage/needs-information` issues which the user has followed up on +1. `triage/needs-information` issues which the user has followed up on, and now require a response. ## Categorization @@ -49,18 +49,23 @@ Other possibilities: If the issue is specific to an operating system, hypervisor, container, addon, or Kubernetes component: -os/ - When the issue appears specific to an operating system +**os/[operating system]** - When the issue appears specific to an operating system + - `os/linux` - `os/macos` - `os/windows` -co/ - When the issue appears specific to a driver + +**co/[driver]** - When the issue appears specific to a driver + - `co/hyperkit` - `co/hyperv` - `co/kvm2` - `co/none-driver` - `co/docker-driver` - `co/virtualbox` -co/ - When the issue appears specific to a k8s component + +**co/[kubernetes component]** - When the issue appears specific to a k8s component + - `co/apiserver` - `co/etcd` - `co/coredns` @@ -74,16 +79,21 @@ co/ - When the issue appears specific to a k8s component Other useful tags: Did an **Event** occur that we can dedup similar issues against? + - `ev/CrashLoopBackoff` - `ev/Panic` - `ev/Pending` - `ev/kubeadm-exit-1` + Suspected **Root cause**: + - `cause/vm-environment` - `cause/invalid-kubelet-options` **Help wanted?** + `Good First Issue` - bug has a proposed solution, can be implemented w/o further discussion. + `Help wanted` - if the bug could use help from a contributor @@ -132,26 +142,27 @@ This includes reviewing: ### Needs More Information A sample response to ask for more info: -``` -I don’t yet have a clear way to replicate this issue. Do you mind adding some additional details? Here is additional information that would be helpful: +> I don’t yet have a clear way to replicate this issue. Do you mind adding some additional details. Here is additional information that would be helpful: +> +> \* The exact `minikube start` command line used +> +> \* The full output of the `minikube start` command, preferably with `--alsologtostderr -v=4` for extra logging. +> +> \* The full output of `minikube logs` +> +> \* The full output of `kubectl get po -A` +> +> +> +> Thank you for sharing your experience! -* The exact `minikube start` command line used -* The full output of the `minikube start` command, preferably with `--alsologtostderr -v=4` for extra logging. - * The full output of `minikube logs` -* The full output of `kubectl get po -A` - - -Thank you for sharing your experience! -``` Then: Label with `triage/needs-information`. ### Issue might be resolved If you think a release may have resolved an issue, ask the author to see if their issue has been resolved: -``` -Could you please check to see if minikube addresses this issue? We've made some changes with how this is handled, and improved the minikube logs output to help us debug tricky cases like this. -``` +> Could you please check to see if minikube addresses this issue? We've made some changes with how this is handled, and improved the minikube logs output to help us debug tricky cases like this. Then: Label with `triage/needs-information`. @@ -169,40 +180,39 @@ In any of these situations, we aim to be kind when closing the issue, and offer Samples responses for these situations include: ### Issue has been addressed -``` -@author: I believe this issue is now addressed by minikube v1.4, as it . If you still see this issue with minikube v1.4 or higher, please reopen this issue by commenting with `/reopen` -Thank you for reporting this issue! -``` +>@author: I believe this issue is now addressed by minikube v1.4, as it . If you still see this issue with minikube v1.4 or higher, please reopen this issue by commenting with `/reopen` +> +>Thank you for reporting this issue! Then: Close the issue ### Duplicate Issue -``` -This issue appears to be a duplicate of #X, do you mind if we move the conversation there? - -This way we can centralize the content relating to the issue. If you feel that this issue is not in fact a duplicate, please re-open it using `/reopen`. If you have additional information to share, please add it to the new issue. - -Thank you for reporting this! -``` +>This issue appears to be a duplicate of #X, do you mind if we move the conversation there? +> +>This way we can centralize the content relating to the issue. If you feel that this issue is not in fact a duplicate, please re-open it using `/reopen`. If you have additional information to share, please add it to the new issue. +> +>Thank you for reporting this! Then: Label with `triage/duplicate` and close the issue. ### Lack of Information If an issue hasn't been active for more than four weeks, and the author has been pinged at least once, then the issue can be closed. -``` -Hey @author -- hopefully it's OK if I close this - there wasn't enough information to make it actionable, and some time has already passed. If you are able to provide additional details, you may reopen it at any point by adding /reopen to your comment. - -Here is additional information that may be helpful to us: - -* Whether the issue occurs with the latest minikube release -* The exact `minikube start` command line used -* The full output of the `minikube start` command, preferably with `--alsologtostderr -v=3` for extra logging. - * The full output of `minikube logs` - -Thank you for sharing your experience! -``` +>Hey @author -- hopefully it's OK if I close this - there wasn't enough information to make it actionable, and some time has already passed. If you are able to provide additional details, you may reopen it at any point by adding /reopen to your comment. +> +>Here is additional information that may be helpful to us: +> +>\* Whether the issue occurs with the latest minikube release +> +>\* The exact `minikube start` command line used +> +>\* The full output of the `minikube start` command, preferably with `--alsologtostderr -v=3` for extra logging. +> +>\* The full output of `minikube logs` +> +> +>Thank you for sharing your experience! Then: Close the issue. From 39b5c534abb1ae3b6903345f15a1eafafb8c6dca Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 19 Mar 2020 12:08:34 -0700 Subject: [PATCH 110/668] Add warning if both vm-driver and driver are specified --- cmd/minikube/cmd/start.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 6227dfa16a..c9018c09d6 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -453,8 +453,18 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState { } // Default to looking at the new driver parameter - if viper.GetString("driver") != "" { - ds := driver.Status(viper.GetString("driver")) + if d := viper.GetString("driver"); d != "" { + if vmd := viper.GetString("vm-driver"); vmd != "" { + // Output a warning + warning := `Both driver={{.driver}} and vm-driver={{.vmd}} have been set. + + Since vm-driver is deprecated, minikube will default to driver={{.driver}}. + + If vm-driver is set in the global config, please run "minikube config unset vm-driver" to resolve this warning. + ` + out.T(out.Warning, warning, out.V{"driver": d, "vmd": vmd}) + } + ds := driver.Status(d) out.T(out.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()}) return ds } From ca6799194744339245f0bb99af5efdb6396b5529 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 19 Mar 2020 12:52:30 -0700 Subject: [PATCH 111/668] Only deleteHosts if running a VM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If minikube is started with the docker driver, all containers and volumes will be deleted in the call to `deleteProfileContainersAndVolumes`. If we then try to call `deleteHosts` on it, we get this warning: ``` ❗ Unable to get host status for "minikube": state: docker inspect -f {{.State.Status}} minikube: Error: No such object: minikube: exit status 1 ``` which will always happen because we've already deleted the container. --- cmd/minikube/cmd/delete.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index 253f48128a..89f1eed4e6 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -239,7 +239,9 @@ func deleteProfile(profile *config.Profile) error { out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err}) } - deleteHosts(api, cc) + if driver.IsVM(cc.Driver) { + deleteHosts(api, cc) + } // In case DeleteHost didn't complete the job. deleteProfileDirectory(profile.Name) From dadf4bd35d0450f67b0207f7375a3baac471d37f Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 19 Mar 2020 14:04:40 -0700 Subject: [PATCH 112/668] Improve error message when docker-env is used with an incompatible runtime --- cmd/minikube/cmd/docker-env.go | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/cmd/minikube/cmd/docker-env.go b/cmd/minikube/cmd/docker-env.go index bf554f151c..138c2f0282 100644 --- a/cmd/minikube/cmd/docker-env.go +++ b/cmd/minikube/cmd/docker-env.go @@ -122,13 +122,18 @@ func isDockerActive(d drivers.Driver) (bool, error) { if err != nil { return false, err } - output, err := client.Output("sudo systemctl is-active docker") - if err != nil { - return false, err - } - // systemd returns error code on inactive + cmd := "sudo systemctl is-active docker" + + output, err := client.Output(cmd) s := strings.TrimSpace(output) - return err == nil && s == "active", nil + + if err != nil { + return false, fmt.Errorf("%s failed: %v\noutput: %q", cmd, err, s) + } + if s != "active" { + return false, fmt.Errorf("%s returned %q", cmd, s) + } + return true, nil } // dockerEnvCmd represents the docker-env command @@ -165,9 +170,15 @@ var dockerEnvCmd = &cobra.Command{ if hostSt != state.Running.String() { exit.WithCodeT(exit.Unavailable, `'{{.profile}}' is not running`, out.V{"profile": profile}) } + + if cc.KubernetesConfig.ContainerRuntime != "docker" { + exit.WithCodeT(exit.BadUsage, `The docker-env command is only compatible with the "docker" runtime, but this cluster was configured to use the "{{.runtime}}" runtime.`, + out.V{"runtime": cc.KubernetesConfig.ContainerRuntime}) + } + ok, err := isDockerActive(host.Driver) if err != nil { - exit.WithError("Error getting service status", err) + exit.WithError("Docker runtime check failed", err) } if !ok { From 8d48c68aece27b6bef66ce989bcb3fe8e3fb63b3 Mon Sep 17 00:00:00 2001 From: Prasad Katti Date: Thu, 19 Mar 2020 14:52:52 -0700 Subject: [PATCH 113/668] minor update based on review feedback --- pkg/minikube/logs/logs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/logs/logs.go b/pkg/minikube/logs/logs.go index 350eacc8f7..a44a9b6073 100644 --- a/pkg/minikube/logs/logs.go +++ b/pkg/minikube/logs/logs.go @@ -192,7 +192,7 @@ func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, length int, f cmds["container status"] = cruntime.ContainerStatusCommand() cfg, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil && !config.IsNotExist(err) { + if err != nil { out.ErrLn("Error loading profile config: %v", err) } From e437e1c44090fc1141b2d28677a19f6e92ce7cda Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 19 Mar 2020 17:29:58 -0700 Subject: [PATCH 114/668] Use -f to remove up to 6s of of delay disabling containerd. --- pkg/minikube/cruntime/containerd.go | 2 +- pkg/minikube/cruntime/crio.go | 2 +- pkg/minikube/cruntime/docker.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/minikube/cruntime/containerd.go b/pkg/minikube/cruntime/containerd.go index d817dd7de0..67beb710a7 100644 --- a/pkg/minikube/cruntime/containerd.go +++ b/pkg/minikube/cruntime/containerd.go @@ -217,7 +217,7 @@ func (r *Containerd) Enable(disOthers bool) error { // Disable idempotently disables containerd on a host func (r *Containerd) Disable() error { - c := exec.Command("sudo", "systemctl", "stop", "containerd") + c := exec.Command("sudo", "systemctl", "stop", "-f", "containerd") if _, err := r.Runner.RunCmd(c); err != nil { return errors.Wrapf(err, "stop containerd") } diff --git a/pkg/minikube/cruntime/crio.go b/pkg/minikube/cruntime/crio.go index 804e4989ba..fff4a8c270 100644 --- a/pkg/minikube/cruntime/crio.go +++ b/pkg/minikube/cruntime/crio.go @@ -133,7 +133,7 @@ func (r *CRIO) Enable(disOthers bool) error { // Disable idempotently disables CRIO on a host func (r *CRIO) Disable() error { - if _, err := r.Runner.RunCmd(exec.Command("sudo", "systemctl", "stop", "crio")); err != nil { + if _, err := r.Runner.RunCmd(exec.Command("sudo", "systemctl", "stop", "-f", "crio")); err != nil { return errors.Wrapf(err, "disable crio.") } return nil diff --git a/pkg/minikube/cruntime/docker.go b/pkg/minikube/cruntime/docker.go index 8b203badec..8641092573 100644 --- a/pkg/minikube/cruntime/docker.go +++ b/pkg/minikube/cruntime/docker.go @@ -127,7 +127,7 @@ func (r *Docker) Restart() error { // Disable idempotently disables Docker on a host func (r *Docker) Disable() error { - c := exec.Command("sudo", "systemctl", "stop", "docker", "docker.socket") + c := exec.Command("sudo", "systemctl", "stop", "-f", "docker", "docker.socket") if _, err := r.Runner.RunCmd(c); err != nil { return errors.Wrap(err, "disable docker") } From 8fa96e29257fc8bf1fb116aacf482b336bd30948 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 19 Mar 2020 17:37:29 -0700 Subject: [PATCH 115/668] Make exec_runner and kic_runner logging behave like ssh_runner. --- pkg/minikube/command/exec_runner.go | 2 ++ pkg/minikube/command/kic_runner.go | 1 + 2 files changed, 3 insertions(+) diff --git a/pkg/minikube/command/exec_runner.go b/pkg/minikube/command/exec_runner.go index 5dc5b81a64..e25346c990 100644 --- a/pkg/minikube/command/exec_runner.go +++ b/pkg/minikube/command/exec_runner.go @@ -46,6 +46,8 @@ func NewExecRunner() Runner { // RunCmd implements the Command Runner interface to run a exec.Cmd object func (*execRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) { rr := &RunResult{Args: cmd.Args} + glog.Infof("Run: %v", rr.Command()) + var outb, errb io.Writer if cmd.Stdout == nil { var so bytes.Buffer diff --git a/pkg/minikube/command/kic_runner.go b/pkg/minikube/command/kic_runner.go index 2d00a15251..754fc64b18 100644 --- a/pkg/minikube/command/kic_runner.go +++ b/pkg/minikube/command/kic_runner.go @@ -88,6 +88,7 @@ func (k *kicRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) { oc.Env = cmd.Env rr := &RunResult{Args: cmd.Args} + glog.Infof("Run: %v", rr.Command()) var outb, errb io.Writer if oc.Stdout == nil { From ea6d6832e5653b7f4cc4bdc87afc5fda346422d1 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 19 Mar 2020 17:40:02 -0700 Subject: [PATCH 116/668] Only restart docker service if unit file has updated --- pkg/provision/buildroot.go | 47 +++++++++++-------------------------- pkg/provision/provision.go | 40 +++++++++++++++++++++++++------ pkg/provision/ubuntu.go | 48 +++++++++++--------------------------- 3 files changed, 60 insertions(+), 75 deletions(-) diff --git a/pkg/provision/buildroot.go b/pkg/provision/buildroot.go index 193478f215..913d8b34d0 100644 --- a/pkg/provision/buildroot.go +++ b/pkg/provision/buildroot.go @@ -19,18 +19,16 @@ package provision import ( "bytes" "fmt" - "path" "text/template" "time" "github.com/docker/machine/libmachine/auth" "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" "github.com/docker/machine/libmachine/provision" "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/provision/serviceaction" "github.com/docker/machine/libmachine/swarm" + "github.com/golang/glog" "k8s.io/minikube/pkg/util/retry" ) @@ -42,7 +40,7 @@ type BuildrootProvisioner struct { // NewBuildrootProvisioner creates a new BuildrootProvisioner func NewBuildrootProvisioner(d drivers.Driver) provision.Provisioner { return &BuildrootProvisioner{ - provision.NewSystemdProvisioner("buildroot", d), + NewSystemdProvisioner("buildroot", d), } } @@ -65,7 +63,7 @@ func (p *BuildrootProvisioner) GenerateDockerOptions(dockerPort int) (*provision noPivot := true // Using pivot_root is not supported on fstype rootfs if fstype, err := rootFileSystemType(p); err == nil { - log.Debugf("root file system type: %s", fstype) + glog.Infof("root file system type: %s", fstype) noPivot = fstype == "rootfs" } @@ -79,7 +77,7 @@ Requires= minikube-automount.service docker.socket Type=notify ` if noPivot { - log.Warn("Using fundamentally insecure --no-pivot option") + glog.Warning("Using fundamentally insecure --no-pivot option") engineConfigTmpl += ` # DOCKER_RAMDISK disables pivot_root in Docker, using MS_MOVE instead. Environment=DOCKER_RAMDISK=yes @@ -140,30 +138,11 @@ WantedBy=multi-user.target return nil, err } - dockerCfg := &provision.DockerOptions{ + do := &provision.DockerOptions{ EngineOptions: engineCfg.String(), EngineOptionsPath: "/lib/systemd/system/docker.service", } - - log.Info("Setting Docker configuration on the remote daemon...") - - if _, err = p.SSHCommand(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s", path.Dir(dockerCfg.EngineOptionsPath), dockerCfg.EngineOptions, dockerCfg.EngineOptionsPath)); err != nil { - return nil, err - } - - // To make sure if there is a already-installed docker on the ISO to pick up the new systemd file - if err := p.Service("", serviceaction.DaemonReload); err != nil { - return nil, err - } - - if err := p.Service("docker", serviceaction.Enable); err != nil { - return nil, err - } - - if err := p.Service("docker", serviceaction.Restart); err != nil { - return nil, err - } - return dockerCfg, nil + return do, updateUnit(p, "docker", do.EngineOptions, do.EngineOptionsPath) } // Package installs a package @@ -177,18 +156,18 @@ func (p *BuildrootProvisioner) Provision(swarmOptions swarm.Options, authOptions p.AuthOptions = authOptions p.EngineOptions = engineOptions - log.Infof("provisioning hostname %q", p.Driver.GetMachineName()) + glog.Infof("provisioning hostname %q", p.Driver.GetMachineName()) if err := p.SetHostname(p.Driver.GetMachineName()); err != nil { return err } p.AuthOptions = setRemoteAuthOptions(p) - log.Debugf("set auth options %+v", p.AuthOptions) + glog.Infof("set auth options %+v", p.AuthOptions) - log.Debugf("setting up certificates") + glog.Infof("setting up certificates") configAuth := func() error { if err := configureAuth(p); err != nil { - log.Warnf("configureAuth failed: %v", err) + glog.Warningf("configureAuth failed: %v", err) return &retry.RetriableError{Err: err} } return nil @@ -196,13 +175,13 @@ func (p *BuildrootProvisioner) Provision(swarmOptions swarm.Options, authOptions err := retry.Expo(configAuth, time.Second, 2*time.Minute) if err != nil { - log.Debugf("Error configuring auth during provisioning %v", err) + glog.Infof("Error configuring auth during provisioning %v", err) return err } - log.Debugf("setting minikube options for container-runtime") + glog.Infof("setting minikube options for container-runtime") if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p); err != nil { - log.Debugf("Error setting container-runtime options during provisioning %v", err) + glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index 52fb131960..ceb4a18464 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -31,10 +31,10 @@ import ( "github.com/docker/machine/libmachine/cert" "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" "github.com/docker/machine/libmachine/mcnutils" "github.com/docker/machine/libmachine/provision" "github.com/docker/machine/libmachine/swarm" + "github.com/golang/glog" "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" @@ -66,11 +66,24 @@ func init() { } +// NewSystemdProvisioner is our fork of the same name in the upstream provision library, without the packages +func NewSystemdProvisioner(osReleaseID string, d drivers.Driver) provision.SystemdProvisioner { + return provision.SystemdProvisioner{ + provision.GenericProvisioner{ + SSHCommander: provision.GenericSSHCommander{Driver: d}, + DockerOptionsDir: "/etc/docker", + DaemonOptionsFile: "/etc/systemd/system/docker.service.d/10-machine.conf", + OsReleaseID: osReleaseID, + Driver: d, + }, + } +} + func configureAuth(p miniProvisioner) error { - log.Infof("configureAuth start") + glog.Infof("configureAuth start") start := time.Now() defer func() { - log.Infof("configureAuth took %s", time.Since(start)) + glog.Infof("configureAuth took %s", time.Since(start)) }() driver := p.GetDriver() @@ -90,7 +103,7 @@ func configureAuth(p miniProvisioner) error { // The Host IP is always added to the certificate's SANs list hosts := append(authOptions.ServerCertSANs, ip, "localhost", "127.0.0.1") - log.Debugf("generating server cert: %s ca-key=%s private-key=%s org=%s san=%s", + glog.Infof("generating server cert: %s ca-key=%s private-key=%s org=%s san=%s", authOptions.ServerCertPath, authOptions.CaCertPath, authOptions.CaPrivateKeyPath, @@ -116,11 +129,11 @@ func configureAuth(p miniProvisioner) error { } func copyHostCerts(authOptions auth.Options) error { - log.Infof("copyHostCerts") + glog.Infof("copyHostCerts") err := os.MkdirAll(authOptions.StorePath, 0700) if err != nil { - log.Errorf("mkdir failed: %v", err) + glog.Errorf("mkdir failed: %v", err) } hostCerts := map[string]string{ @@ -144,7 +157,7 @@ func copyHostCerts(authOptions auth.Options) error { } func copyRemoteCerts(authOptions auth.Options, driver drivers.Driver) error { - log.Infof("copyRemoteCerts") + glog.Infof("copyRemoteCerts") remoteCerts := map[string]string{ authOptions.CaCertPath: authOptions.CaCertRemotePath, @@ -276,3 +289,16 @@ func concatStrings(src []string, prefix string, postfix string) []string { } return ret } + +// updateUnit efficiently updates a systemd unit file +func updateUnit(p provision.SSHCommander, name string, content string, dst string) error { + glog.Infof("Updating %s ...", dst) + + if _, err := p.SSHCommand(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s.new", path.Dir(dst), content, dst)); err != nil { + return err + } + if _, err := p.SSHCommand(fmt.Sprintf("sudo diff -u %s %s.new || { sudo mv %s.new %s; sudo systemctl -f daemon-reload && sudo sudo systemctl -f restart docker; }", dst, dst, dst, dst)); err != nil { + return err + } + return nil +} diff --git a/pkg/provision/ubuntu.go b/pkg/provision/ubuntu.go index 7cebe18dbb..9d2b272bd2 100644 --- a/pkg/provision/ubuntu.go +++ b/pkg/provision/ubuntu.go @@ -19,18 +19,16 @@ package provision import ( "bytes" "fmt" - "path" "text/template" "time" "github.com/docker/machine/libmachine/auth" "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" "github.com/docker/machine/libmachine/provision" "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/provision/serviceaction" "github.com/docker/machine/libmachine/swarm" + "github.com/golang/glog" "k8s.io/minikube/pkg/util/retry" ) @@ -43,7 +41,7 @@ type UbuntuProvisioner struct { func NewUbuntuProvisioner(d drivers.Driver) provision.Provisioner { return &UbuntuProvisioner{ BuildrootProvisioner{ - provision.NewSystemdProvisioner("ubuntu", d), + NewSystemdProvisioner("ubuntu", d), }, } } @@ -67,7 +65,7 @@ func (p *UbuntuProvisioner) GenerateDockerOptions(dockerPort int) (*provision.Do noPivot := true // Using pivot_root is not supported on fstype rootfs if fstype, err := rootFileSystemType(p); err == nil { - log.Debugf("root file system type: %s", fstype) + glog.Infof("root file system type: %s", fstype) noPivot = fstype == "rootfs" } @@ -83,7 +81,7 @@ Requires=docker.socket Type=notify ` if noPivot { - log.Warn("Using fundamentally insecure --no-pivot option") + glog.Warning("Using fundamentally insecure --no-pivot option") engineConfigTmpl += ` # DOCKER_RAMDISK disables pivot_root in Docker, using MS_MOVE instead. Environment=DOCKER_RAMDISK=yes @@ -144,30 +142,11 @@ WantedBy=multi-user.target return nil, err } - dockerCfg := &provision.DockerOptions{ + do := &provision.DockerOptions{ EngineOptions: engineCfg.String(), EngineOptionsPath: "/lib/systemd/system/docker.service", } - - log.Info("Setting Docker configuration on the remote daemon...") - - if _, err = p.SSHCommand(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s", path.Dir(dockerCfg.EngineOptionsPath), dockerCfg.EngineOptions, dockerCfg.EngineOptionsPath)); err != nil { - return nil, err - } - - // because in kic base image we pre-install docker it already has a service file. we need to daemon-reload for the new systemd file - if err := p.Service("", serviceaction.DaemonReload); err != nil { - return nil, err - } - - if err := p.Service("docker", serviceaction.Enable); err != nil { - return nil, err - } - - if err := p.Service("docker", serviceaction.Restart); err != nil { - return nil, err - } - return dockerCfg, nil + return do, updateUnit(p, "docker", do.EngineOptions, do.EngineOptionsPath) } // Package installs a package @@ -181,32 +160,33 @@ func (p *UbuntuProvisioner) Provision(swarmOptions swarm.Options, authOptions au p.AuthOptions = authOptions p.EngineOptions = engineOptions - log.Infof("provisioning hostname %q", p.Driver.GetMachineName()) + glog.Infof("provisioning hostname %q", p.Driver.GetMachineName()) if err := p.SetHostname(p.Driver.GetMachineName()); err != nil { return err } p.AuthOptions = setRemoteAuthOptions(p) - log.Debugf("set auth options %+v", p.AuthOptions) + glog.Infof("set auth options %+v", p.AuthOptions) - log.Debugf("setting up certificates") + glog.Infof("setting up certificates") configAuth := func() error { if err := configureAuth(p); err != nil { - log.Warnf("configureAuth failed: %v", err) + glog.Warningf("configureAuth failed: %v", err) return &retry.RetriableError{Err: err} } return nil } err := retry.Expo(configAuth, time.Second, 2*time.Minute) + if err != nil { - log.Debugf("Error configuring auth during provisioning %v", err) + glog.Infof("Error configuring auth during provisioning %v", err) return err } - log.Debugf("setting minikube options for container-runtime") + glog.Infof("setting minikube options for container-runtime") if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p); err != nil { - log.Debugf("Error setting container-runtime options during provisioning %v", err) + glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } From 5cd7660d1e49089155bb3c2b59ae25430e7eb7b5 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 19 Mar 2020 17:44:21 -0700 Subject: [PATCH 117/668] fixHost: only reprovision if necessary, and only once --- cmd/minikube/cmd/node_add.go | 2 +- cmd/minikube/cmd/node_start.go | 2 +- cmd/minikube/cmd/start.go | 18 +++++++++++------- go.sum | 3 +++ pkg/minikube/machine/cluster_test.go | 12 ++++++------ pkg/minikube/machine/fix.go | 23 ++++++++++------------- pkg/minikube/machine/start.go | 4 ++-- pkg/minikube/node/config.go | 8 ++++++++ pkg/minikube/node/machine.go | 8 ++++---- pkg/minikube/node/node.go | 2 +- pkg/minikube/node/start.go | 4 ++-- 11 files changed, 49 insertions(+), 37 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 9ee9e39f1e..f450d20540 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -53,7 +53,7 @@ var nodeAddCmd = &cobra.Command{ exit.WithError("Error adding node to cluster", err) } - _, err = node.Start(*cc, *n, false, nil) + _, err = node.Start(*cc, config.ClusterConfig{}, *n, false, nil) if err != nil { exit.WithError("Error starting node", err) } diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index c0090b6287..ede4130c87 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -61,7 +61,7 @@ var nodeStartCmd = &cobra.Command{ } // Start it up baby - _, err = node.Start(*cc, *n, false, nil) + _, err = node.Start(*cc, *cc, *n, false, nil) if err != nil { out.FatalT("Failed to start node {{.name}}", out.V{"name": name}) } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 6227dfa16a..35019c76ec 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -313,7 +313,7 @@ func runStart(cmd *cobra.Command, args []string) { } k8sVersion := getKubernetesVersion(existing) - mc, n, err := generateCfgFromFlags(cmd, k8sVersion, driverName) + cc, n, err := generateCfgFromFlags(cmd, k8sVersion, driverName) if err != nil { exit.WithError("Failed to generate config", err) } @@ -329,7 +329,7 @@ func runStart(cmd *cobra.Command, args []string) { if err != nil { exit.WithError("Failed to cache ISO", err) } - mc.MinikubeISO = url + cc.MinikubeISO = url } if viper.GetBool(nativeSSH) { @@ -338,12 +338,12 @@ func runStart(cmd *cobra.Command, args []string) { ssh.SetDefaultClient(ssh.External) } - kubeconfig, err := startNode(existing, mc, n) + kubeconfig, err := startNode(existing, cc, n) if err != nil { exit.WithError("Starting node", err) } - if err := showKubectlInfo(kubeconfig, k8sVersion, mc.Name); err != nil { + if err := showKubectlInfo(kubeconfig, k8sVersion, cc.Name); err != nil { glog.Errorf("kubectl info: %v", err) } } @@ -383,7 +383,7 @@ func displayEnviron(env []string) { } } -func startNode(existing *config.ClusterConfig, mc config.ClusterConfig, n config.Node) (*kubeconfig.Settings, error) { +func startNode(existing *config.ClusterConfig, cc config.ClusterConfig, n config.Node) (*kubeconfig.Settings, error) { var existingAddons map[string]bool if viper.GetBool(installAddons) { existingAddons = map[string]bool{} @@ -391,7 +391,11 @@ func startNode(existing *config.ClusterConfig, mc config.ClusterConfig, n config existingAddons = existing.Addons } } - return node.Start(mc, n, true, existingAddons) + + if existing == nil { + existing = &config.ClusterConfig{} + } + return node.Start(cc, *existing, n, true, existingAddons) } func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName string) error { @@ -814,7 +818,7 @@ func validateRegistryMirror() { } } -// generateCfgFromFlags generates config.Config based on flags and supplied arguments +// generateCfgFromFlags generates config.ClusterConfig based on flags and supplied arguments func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) (config.ClusterConfig, config.Node, error) { r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)}) if err != nil { diff --git a/go.sum b/go.sum index d84ef83e6e..8a4fe67c98 100644 --- a/go.sum +++ b/go.sum @@ -61,7 +61,9 @@ github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmx github.com/afbjorklund/go-getter v1.4.1-0.20190910175809-eb9f6c26742c h1:18gEt7qzn7CW7qMkfPTFyyotlPbvPQo9o4IDV8jZqP4= github.com/afbjorklund/go-getter v1.4.1-0.20190910175809-eb9f6c26742c/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= @@ -972,6 +974,7 @@ google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 0c32c8642a..fbb0066691 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -129,7 +129,7 @@ func TestStartHostExists(t *testing.T) { n := config.Node{Name: ih.Name} // This should pass without calling Create because the host exists already. - h, err := StartHost(api, mc, n) + h, err := StartHost(api, mc, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -163,7 +163,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { n := config.Node{Name: h.Name} // This should pass with creating host, while machine does not exist. - h, err = StartHost(api, mc, n) + h, err = StartHost(api, mc, mc, n) if err != nil { if err != ErrorMachineNotExist { t.Fatalf("Error starting host: %v", err) @@ -174,7 +174,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { n.Name = h.Name // Second call. This should pass without calling Create because the host exists already. - h, err = StartHost(api, mc, n) + h, err = StartHost(api, mc, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -207,7 +207,7 @@ func TestStartStoppedHost(t *testing.T) { mc := defaultClusterConfig mc.Name = h.Name n := config.Node{Name: h.Name} - h, err = StartHost(api, mc, n) + h, err = StartHost(api, mc, mc, n) if err != nil { t.Fatal("Error starting host.") } @@ -235,7 +235,7 @@ func TestStartHost(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - h, err := StartHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) + h, err := StartHost(api, defaultClusterConfig, defaultClusterConfig, config.Node{Name: "minikube"}) if err != nil { t.Fatal("Error starting host.") } @@ -269,7 +269,7 @@ func TestStartHostConfig(t *testing.T) { DockerOpt: []string{"param=value"}, } - h, err := StartHost(api, cfg, config.Node{Name: "minikube"}) + h, err := StartHost(api, cfg, cfg, config.Node{Name: "minikube"}) if err != nil { t.Fatal("Error starting host.") } diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 25f921cd5a..c8fcee801e 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -20,6 +20,7 @@ import ( "fmt" "math" "os" + "reflect" "strconv" "strings" "time" @@ -28,7 +29,6 @@ import ( "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/host" - "github.com/docker/machine/libmachine/provision" "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" @@ -36,7 +36,7 @@ import ( "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/util/retry" + "k8s.io/minikube/pkg/provision" ) // hostRunner is a minimal host.Host based interface for running commands @@ -56,7 +56,7 @@ var ( ) // fixHost fixes up a previously configured VM so that it is ready to run Kubernetes -func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, error) { +func fixHost(api libmachine.API, cc config.ClusterConfig, existing config.ClusterConfig, n config.Node) (*host.Host, error) { out.T(out.Waiting, "Reconfiguring existing host ...") start := time.Now() @@ -78,15 +78,16 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. return h, err } + old := engineOptions(existing) e := engineOptions(cc) - if len(e.Env) > 0 { + if !reflect.DeepEqual(old, e) { + glog.Infof("docker config changed, updating provisioner: %+v", e) h.HostOptions.EngineOptions.Env = e.Env - glog.Infof("Detecting provisioner ...") - provisioner, err := provision.DetectProvisioner(h.Driver) - if err != nil { - return h, errors.Wrap(err, "detecting provisioner") + p := provision.NewBuildrootProvisioner(h.Driver) + if driver.IsKIC(h.Driver.DriverName()) { + p = provision.NewUbuntuProvisioner(h.Driver) } - if err := provisioner.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions); err != nil { + if err := p.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions); err != nil { return h, errors.Wrap(err, "provision") } } @@ -104,10 +105,6 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. return h, nil } - glog.Infof("Configuring auth for driver %s ...", h.Driver.DriverName()) - if err := h.ConfigureAuth(); err != nil { - return h, &retry.RetriableError{Err: errors.Wrap(err, "Error configuring auth on host")} - } return h, ensureSyncedGuestClock(h, cc.Driver) } diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index 73c982fa80..a885b0476e 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -62,7 +62,7 @@ var ( ) // StartHost starts a host VM. -func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) { +func StartHost(api libmachine.API, cfg config.ClusterConfig, existing config.ClusterConfig, n config.Node) (*host.Host, error) { // Prevent machine-driver boot races, as well as our own certificate race releaser, err := acquireMachinesLock(cfg.Name) if err != nil { @@ -83,7 +83,7 @@ func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*ho return createHost(api, cfg, n) } glog.Infoln("Skipping create...Using existing machine configuration") - return fixHost(api, cfg, n) + return fixHost(api, cfg, existing, n) } func engineOptions(cfg config.ClusterConfig) *engine.Options { diff --git a/pkg/minikube/node/config.go b/pkg/minikube/node/config.go index b29867f1b6..df7eed881a 100644 --- a/pkg/minikube/node/config.go +++ b/pkg/minikube/node/config.go @@ -23,6 +23,7 @@ import ( "os/exec" "path/filepath" "strconv" + "time" "github.com/blang/semver" "github.com/docker/machine/libmachine" @@ -59,6 +60,12 @@ var ( // configureRuntimes does what needs to happen to get a runtime going. func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager { + start := time.Now() + glog.Infof("configureRuntimes start") + defer func() { + glog.Infof("configureRuntimes took %s", time.Since(start)) + }() + co := cruntime.Config{ Type: viper.GetString(containerRuntime), Runner: runner, ImageRepository: k8s.ImageRepository, @@ -90,6 +97,7 @@ func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config } } + glog.Infof("enabling %s ...", cr.Name()) err = cr.Enable(disableOthers) if err != nil { exit.WithError("Failed to enable container runtime", err) diff --git a/pkg/minikube/node/machine.go b/pkg/minikube/node/machine.go index 483131515a..ffe4a792d7 100644 --- a/pkg/minikube/node/machine.go +++ b/pkg/minikube/node/machine.go @@ -39,12 +39,12 @@ import ( "k8s.io/minikube/pkg/util/retry" ) -func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { +func startMachine(cfg *config.ClusterConfig, existing *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { m, err := machine.NewAPIClient() if err != nil { exit.WithError("Failed to get machine client", err) } - host, preExists = startHost(m, *cfg, *node) + host, preExists = startHost(m, *cfg, *existing, *node) runner, err = machine.CommandRunner(host) if err != nil { exit.WithError("Failed to get command runner", err) @@ -68,13 +68,13 @@ func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command. } // startHost starts a new minikube host using a VM or None -func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) { +func startHost(api libmachine.API, mc config.ClusterConfig, existing config.ClusterConfig, n config.Node) (*host.Host, bool) { exists, err := api.Exists(mc.Name) if err != nil { exit.WithError("Failed to check if machine exists", err) } - host, err := machine.StartHost(api, mc, n) + host, err := machine.StartHost(api, mc, existing, n) if err != nil { exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) } diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index e92bad65b5..c25a9c95bd 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -65,7 +65,7 @@ func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool, return nil, err } - _, err = Start(*cc, n, false, nil) + _, err = Start(*cc, *cc, n, false, nil) return &n, err } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index a3c5eee92b..8b75831f58 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -33,7 +33,7 @@ import ( ) // Start spins up a guest and starts the kubernetes node. -func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons map[string]bool) (*kubeconfig.Settings, error) { +func Start(mc config.ClusterConfig, existing config.ClusterConfig, n config.Node, primary bool, existingAddons map[string]bool) (*kubeconfig.Settings, error) { k8sVersion := mc.KubernetesConfig.KubernetesVersion driverName := mc.Driver @@ -59,7 +59,7 @@ func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons handleDownloadOnly(&cacheGroup, &kicGroup, k8sVersion) waitDownloadKicArtifacts(&kicGroup) - mRunner, preExists, machineAPI, host := startMachine(&mc, &n) + mRunner, preExists, machineAPI, host := startMachine(&mc, &existing, &n) defer machineAPI.Close() // wait for preloaded tarball to finish downloading before configuring runtimes From df608f70103878ba36b2309a5d1966c9b1ac2090 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 19 Mar 2020 23:45:29 -0700 Subject: [PATCH 118/668] exit earlier if the driver is invalid --- pkg/minikube/driver/driver.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index 2dce6350cd..1e73df9edf 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -19,12 +19,15 @@ package driver import ( "fmt" "os" + "runtime" "sort" "strings" "github.com/golang/glog" "k8s.io/minikube/pkg/drivers/kic" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/registry" ) @@ -214,6 +217,9 @@ func Suggest(options []registry.DriverState) (registry.DriverState, []registry.D // Status returns the status of a driver func Status(name string) registry.DriverState { d := registry.Driver(name) + if d.Empty() { + exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS}) + } return registry.DriverState{ Name: d.Name, Priority: d.Priority, From 65d21b6fbed991144c18f680f4d08df60d42497c Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Fri, 20 Mar 2020 07:03:38 -0700 Subject: [PATCH 119/668] Fix tests --- pkg/minikube/cruntime/cruntime_test.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pkg/minikube/cruntime/cruntime_test.go b/pkg/minikube/cruntime/cruntime_test.go index af70e01c80..9523f1126a 100644 --- a/pkg/minikube/cruntime/cruntime_test.go +++ b/pkg/minikube/cruntime/cruntime_test.go @@ -408,6 +408,10 @@ func (f *FakeRunner) crictl(args []string, _ bool) (string, error) { func (f *FakeRunner) systemctl(args []string, root bool) (string, error) { // nolint result 0 (string) is always "" action := args[0] svcs := args[1:] + // force + if svcs[0] == "-f" { + svcs = svcs[1:] + } out := "" for i, arg := range args { @@ -503,9 +507,9 @@ func TestDisable(t *testing.T) { runtime string want []string }{ - {"docker", []string{"sudo", "systemctl", "stop", "docker", "docker.socket"}}, - {"crio", []string{"sudo", "systemctl", "stop", "crio"}}, - {"containerd", []string{"sudo", "systemctl", "stop", "containerd"}}, + {"docker", []string{"sudo", "systemctl", "stop", "-f", "docker", "docker.socket"}}, + {"crio", []string{"sudo", "systemctl", "stop", "-f", "crio"}}, + {"containerd", []string{"sudo", "systemctl", "stop", "-f", "containerd"}}, } for _, tc := range tests { t.Run(tc.runtime, func(t *testing.T) { From ff9c4fdcc6a9c0bb4965e4ea95176427750d075e Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Fri, 20 Mar 2020 07:15:34 -0700 Subject: [PATCH 120/668] Lint fixes --- pkg/minikube/machine/client.go | 1 + pkg/provision/provision.go | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/minikube/machine/client.go b/pkg/minikube/machine/client.go index 0121c92358..160b783a2c 100644 --- a/pkg/minikube/machine/client.go +++ b/pkg/minikube/machine/client.go @@ -172,6 +172,7 @@ func (api *LocalClient) Create(h *host.Host) error { return fmt.Errorf("driver %q does not exist", h.DriverName) } if def.Init == nil { + // NOTE: This will call provision.DetectProvisioner return api.legacyClient.Create(h) } diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index ceb4a18464..7b2e9e6539 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -69,7 +69,7 @@ func init() { // NewSystemdProvisioner is our fork of the same name in the upstream provision library, without the packages func NewSystemdProvisioner(osReleaseID string, d drivers.Driver) provision.SystemdProvisioner { return provision.SystemdProvisioner{ - provision.GenericProvisioner{ + GenericProvisioner: provision.GenericProvisioner{ SSHCommander: provision.GenericSSHCommander{Driver: d}, DockerOptionsDir: "/etc/docker", DaemonOptionsFile: "/etc/systemd/system/docker.service.d/10-machine.conf", @@ -292,12 +292,12 @@ func concatStrings(src []string, prefix string, postfix string) []string { // updateUnit efficiently updates a systemd unit file func updateUnit(p provision.SSHCommander, name string, content string, dst string) error { - glog.Infof("Updating %s ...", dst) + glog.Infof("Updating %s unit: %s ...", name, dst) if _, err := p.SSHCommand(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s.new", path.Dir(dst), content, dst)); err != nil { return err } - if _, err := p.SSHCommand(fmt.Sprintf("sudo diff -u %s %s.new || { sudo mv %s.new %s; sudo systemctl -f daemon-reload && sudo sudo systemctl -f restart docker; }", dst, dst, dst, dst)); err != nil { + if _, err := p.SSHCommand(fmt.Sprintf("sudo diff -u %s %s.new || { sudo mv %s.new %s; sudo systemctl -f daemon-reload && sudo sudo systemctl -f restart %s; }", dst, dst, dst, dst, name)); err != nil { return err } return nil From e1a766e2c82e254644843c477502db03b6cfe846 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Fri, 20 Mar 2020 08:10:01 -0700 Subject: [PATCH 121/668] Improve code structuring --- pkg/minikube/machine/client.go | 19 ++++++++++--------- pkg/minikube/machine/cluster_test.go | 9 --------- pkg/minikube/machine/fix.go | 8 ++------ pkg/minikube/machine/machine.go | 25 +++++++++++++++++++++++++ pkg/provision/provision.go | 13 +++++++++++++ 5 files changed, 50 insertions(+), 24 deletions(-) diff --git a/pkg/minikube/machine/client.go b/pkg/minikube/machine/client.go index 0121c92358..e9052ef448 100644 --- a/pkg/minikube/machine/client.go +++ b/pkg/minikube/machine/client.go @@ -36,11 +36,11 @@ import ( "github.com/docker/machine/libmachine/host" "github.com/docker/machine/libmachine/mcnutils" "github.com/docker/machine/libmachine/persist" - lib_provision "github.com/docker/machine/libmachine/provision" "github.com/docker/machine/libmachine/ssh" "github.com/docker/machine/libmachine/state" "github.com/docker/machine/libmachine/swarm" "github.com/docker/machine/libmachine/version" + "github.com/golang/glog" "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/driver" @@ -49,7 +49,6 @@ import ( "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/registry" "k8s.io/minikube/pkg/minikube/sshutil" - "k8s.io/minikube/pkg/provision" ) // NewRPCClient gets a new client. @@ -167,6 +166,12 @@ func CommandRunner(h *host.Host) (command.Runner, error) { // Create creates the host func (api *LocalClient) Create(h *host.Host) error { + glog.Infof("LocalClient.Create starting") + start := time.Now() + defer func() { + glog.Infof("LocalClient.Create took %s", time.Since(start)) + }() + def := registry.Driver(h.DriverName) if def.Empty() { return fmt.Errorf("driver %q does not exist", h.DriverName) @@ -209,21 +214,17 @@ func (api *LocalClient) Create(h *host.Host) error { { "provisioning", func() error { + // Skippable because we don't reconfigure Docker? if driver.BareMetal(h.Driver.DriverName()) { return nil } - var pv lib_provision.Provisioner - if driver.IsKIC(h.Driver.DriverName()) { - pv = provision.NewUbuntuProvisioner(h.Driver) - } else { - pv = provision.NewBuildrootProvisioner(h.Driver) - } - return pv.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions) + return provisionDockerMachine(h) }, }, } for _, step := range steps { + if err := step.f(); err != nil { return errors.Wrap(err, step.name) } diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index fbb0066691..a660b3448e 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -139,9 +139,6 @@ func TestStartHostExists(t *testing.T) { if s, _ := h.Driver.GetState(); s != state.Running { t.Fatalf("Machine not started.") } - if !md.Provisioner.Provisioned { - t.Fatalf("Expected provision to be called") - } } func TestStartHostErrMachineNotExist(t *testing.T) { @@ -185,9 +182,6 @@ func TestStartHostErrMachineNotExist(t *testing.T) { if s, _ := h.Driver.GetState(); s != state.Running { t.Fatalf("Machine not started.") } - if !md.Provisioner.Provisioned { - t.Fatalf("Expected provision to be called") - } } func TestStartStoppedHost(t *testing.T) { @@ -223,9 +217,6 @@ func TestStartStoppedHost(t *testing.T) { t.Fatalf("Machine must be saved after starting.") } - if !md.Provisioner.Provisioned { - t.Fatalf("Expected provision to be called") - } } func TestStartHost(t *testing.T) { diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index c8fcee801e..83e57c4174 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -36,7 +36,6 @@ import ( "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/provision" ) // hostRunner is a minimal host.Host based interface for running commands @@ -83,11 +82,8 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, existing config.Cluste if !reflect.DeepEqual(old, e) { glog.Infof("docker config changed, updating provisioner: %+v", e) h.HostOptions.EngineOptions.Env = e.Env - p := provision.NewBuildrootProvisioner(h.Driver) - if driver.IsKIC(h.Driver.DriverName()) { - p = provision.NewUbuntuProvisioner(h.Driver) - } - if err := p.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions); err != nil { + err := provisionDockerMachine(h) + if err != nil { return h, errors.Wrap(err, "provision") } } diff --git a/pkg/minikube/machine/machine.go b/pkg/minikube/machine/machine.go index 215f240753..a9cba1e413 100644 --- a/pkg/minikube/machine/machine.go +++ b/pkg/minikube/machine/machine.go @@ -18,7 +18,10 @@ package machine import ( "github.com/docker/machine/libmachine/host" + libprovision "github.com/docker/machine/libmachine/provision" "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/provision" ) // Machine contains information about a machine @@ -74,3 +77,25 @@ func LoadMachine(name string) (*Machine, error) { } return &mm, nil } + +// provisionDockerMachine provides fast provisioning of a docker machine +func provisionDockerMachine(h *host.Host) error { + p, err := fastDetectProvisioner(h) + if err != nil { + return errors.Wrap(err, "fast detect") + } + return p.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions) +} + +// fastDetectProvisioner provides a shortcut for provisioner detection +func fastDetectProvisioner(h *host.Host) (libprovision.Provisioner, error) { + d := h.Driver.DriverName() + switch { + case driver.IsKIC(d): + return provision.NewUbuntuProvisioner(h.Driver), nil + case driver.BareMetal(d): + return libprovision.DetectProvisioner(h.Driver) + default: + return provision.NewBuildrootProvisioner(h.Driver), nil + } +} diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index 52fb131960..b5dd4651fb 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -66,6 +66,19 @@ func init() { } +// NewSystemdProvisioner is our fork of the same name in the upstream provision library, without the packages +func NewSystemdProvisioner(osReleaseID string, d drivers.Driver) provision.SystemdProvisioner { + return provision.SystemdProvisioner{ + GenericProvisioner: provision.GenericProvisioner{ + SSHCommander: provision.GenericSSHCommander{Driver: d}, + DockerOptionsDir: "/etc/docker", + DaemonOptionsFile: "/etc/systemd/system/docker.service.d/10-machine.conf", + OsReleaseID: osReleaseID, + Driver: d, + }, + } +} + func configureAuth(p miniProvisioner) error { log.Infof("configureAuth start") start := time.Now() From 51bce4d780e5c5483ac1d12db449bb9060398aad Mon Sep 17 00:00:00 2001 From: Kenta Iso Date: Sat, 21 Mar 2020 00:35:23 +0900 Subject: [PATCH 122/668] =?UTF-8?q?bumpup=20istio=20operator=20addon=201.4?= =?UTF-8?q?.0=20=E2=86=92=201.5.0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../istio-operator.yaml.tmpl | 47 +++++-------------- deploy/addons/istio/README.md | 3 +- .../istio/istio-default-profile.yaml.tmpl | 17 +++++-- 3 files changed, 26 insertions(+), 41 deletions(-) diff --git a/deploy/addons/istio-provisioner/istio-operator.yaml.tmpl b/deploy/addons/istio-provisioner/istio-operator.yaml.tmpl index 0ac264461f..06b0ba995d 100644 --- a/deploy/addons/istio-provisioner/istio-operator.yaml.tmpl +++ b/deploy/addons/istio-provisioner/istio-operator.yaml.tmpl @@ -11,49 +11,24 @@ metadata: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: istiocontrolplanes.install.istio.io + name: istiooperators.install.istio.io labels: kubernetes.io/minikube-addons: istio - addonmanager.kubernetes.io/mode: EnsureExists + addonmanager.kubernetes.io/mode: EnsureExists spec: group: install.istio.io names: - kind: IstioControlPlane - listKind: IstioControlPlaneList - plural: istiocontrolplanes - singular: istiocontrolplane + kind: IstioOperator + listKind: IstioOperatorList + plural: istiooperators + singular: istiooperator shortNames: - - icp + - iop scope: Namespaced subresources: status: {} - validation: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. - More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. - More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - spec: - description: 'Specification of the desired state of the istio control plane resource. - More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - type: object - status: - description: 'Status describes each of istio control plane component status at the current time. - 0 means NONE, 1 means UPDATING, 2 means HEALTHY, 3 means ERROR, 4 means RECONCILING. - More info: https://github.com/istio/operator/blob/master/pkg/apis/istio/v1alpha2/v1alpha2.pb.html & - https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - type: object versions: - - name: v1alpha2 + - name: v1alpha1 served: true storage: true ... @@ -243,9 +218,9 @@ spec: serviceAccountName: istio-operator containers: - name: istio-operator - image: docker.io/istio/operator:1.4.0 + image: docker.io/istio/operator:1.5.0 command: - - istio-operator + - operator - server imagePullPolicy: Always resources: @@ -257,7 +232,7 @@ spec: memory: 128Mi env: - name: WATCH_NAMESPACE - value: "" + value: "istio-system" - name: LEADER_ELECTION_NAMESPACE valueFrom: fieldRef: diff --git a/deploy/addons/istio/README.md b/deploy/addons/istio/README.md index 0cc971bcf5..306948b876 100644 --- a/deploy/addons/istio/README.md +++ b/deploy/addons/istio/README.md @@ -3,9 +3,10 @@ ### Enable istio on minikube Make sure to start minikube with at least 8192 MB of memory and 4 CPUs. +See official [Platform Setup](https://istio.io/docs/setup/platform-setup/) documentation. ```shell script -minikube start --memory=8000mb --cpus=4 +minikube start --memory=8192mb --cpus=4 ``` To enable this addon, simply run: diff --git a/deploy/addons/istio/istio-default-profile.yaml.tmpl b/deploy/addons/istio/istio-default-profile.yaml.tmpl index 036c6f31dd..6f4ade1271 100644 --- a/deploy/addons/istio/istio-default-profile.yaml.tmpl +++ b/deploy/addons/istio/istio-default-profile.yaml.tmpl @@ -1,10 +1,19 @@ -apiVersion: install.istio.io/v1alpha2 -kind: IstioControlPlane +apiVersion: v1 +kind: Namespace metadata: - namespace: istio-operator + name: istio-system + labels: + kubernetes.io/minikube-addons: istio + addonmanager.kubernetes.io/mode: EnsureExists + +--- +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +metadata: + namespace: istio-system name: example-istiocontrolplane labels: kubernetes.io/minikube-addons: istio - addonmanager.kubernetes.io/mode: Reconcile + addonmanager.kubernetes.io/mode: Reconcile spec: profile: default From 4033b68355271dc836d9384e038db983d6fdf777 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Fri, 20 Mar 2020 08:53:14 -0700 Subject: [PATCH 123/668] Wait 30 seconds before spamming console with error logs --- pkg/minikube/bootstrapper/bsutil/kverify/kverify.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go index e420307c65..0b4c8d887c 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go @@ -40,17 +40,18 @@ import ( "k8s.io/minikube/pkg/minikube/logs" ) +// minLogCheckTime how long to wait before spamming error logs to console +const minLogCheckTime = 30 * time.Second + // WaitForAPIServerProcess waits for api server to be healthy returns error if it doesn't func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner, start time.Time, timeout time.Duration) error { glog.Infof("waiting for apiserver process to appear ...") - minLogTime := kconst.APICallRetryInterval * 10 - err := wait.PollImmediate(time.Millisecond*500, timeout, func() (bool, error) { if time.Since(start) > timeout { return false, fmt.Errorf("cluster wait timed out during process check") } - if time.Since(start) > minLogTime { + if time.Since(start) > minLogCheckTime { announceProblems(r, bs, cr) time.Sleep(kconst.APICallRetryInterval * 5) } @@ -81,13 +82,12 @@ func apiServerPID(cr command.Runner) (int, error) { func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner, client *kubernetes.Clientset, start time.Time, timeout time.Duration) error { glog.Info("waiting for kube-system pods to appear ...") pStart := time.Now() - minLogTime := kconst.APICallRetryInterval * 10 podList := func() (bool, error) { if time.Since(start) > timeout { return false, fmt.Errorf("cluster wait timed out during pod check") } - if time.Since(start) > minLogTime { + if time.Since(start) > minLogCheckTime { announceProblems(r, bs, cr) time.Sleep(kconst.APICallRetryInterval * 5) } @@ -116,13 +116,12 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c glog.Infof("waiting for apiserver healthz status ...") hStart := time.Now() - minLogTime := kconst.APICallRetryInterval * 10 healthz := func() (bool, error) { if time.Since(start) > timeout { return false, fmt.Errorf("cluster wait timed out during healthz check") } - if time.Since(start) > minLogTime { + if time.Since(start) > minLogCheckTime { announceProblems(r, bs, cr) time.Sleep(kconst.APICallRetryInterval * 5) } From 9cf91284579486fb7f9f9f27409d323f208f6d13 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Fri, 20 Mar 2020 11:05:14 -0700 Subject: [PATCH 124/668] Remove CrashLoopBackOff from root causes --- pkg/minikube/logs/logs.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/minikube/logs/logs.go b/pkg/minikube/logs/logs.go index e4ec4a9047..5b497ce858 100644 --- a/pkg/minikube/logs/logs.go +++ b/pkg/minikube/logs/logs.go @@ -51,7 +51,6 @@ var rootCauses = []string{ `unable to evict any pods`, `eviction manager: unexpected error`, `Resetting AnonymousAuth to false`, - `CrashLoopBackOff`, `Unable to register node.*forbidden`, `Failed to initialize CSINodeInfo.*forbidden`, `Failed to admit pod`, From 0ccb04a6ead8fc58c0552b95bed7ba67326598ed Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Fri, 20 Mar 2020 11:16:57 -0700 Subject: [PATCH 125/668] Skip kubeadm if cluster is running & properly configured --- pkg/minikube/bootstrapper/bsutil/binaries.go | 5 + pkg/minikube/bootstrapper/bsutil/files.go | 6 +- .../bootstrapper/bsutil/kverify/kverify.go | 70 +++++++- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 163 +++++++++++------- 4 files changed, 177 insertions(+), 67 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/binaries.go b/pkg/minikube/bootstrapper/bsutil/binaries.go index 32b9a166a2..0ffcaa05ad 100644 --- a/pkg/minikube/bootstrapper/bsutil/binaries.go +++ b/pkg/minikube/bootstrapper/bsutil/binaries.go @@ -50,6 +50,11 @@ func TransferBinaries(cfg config.KubernetesConfig, c command.Runner) error { return err } + // stop kubelet to avoid "Text File Busy" error + if _, err := c.RunCmd(exec.Command("/bin/bash", "-c", "pgrep kubelet && sudo systemctl stop kubelet")); err != nil { + glog.Warningf("unable to stop kubelet: %s", err) + } + var g errgroup.Group for _, name := range constants.KubernetesReleaseBinaries { name := name diff --git a/pkg/minikube/bootstrapper/bsutil/files.go b/pkg/minikube/bootstrapper/bsutil/files.go index c184eca3c9..cec6d69085 100644 --- a/pkg/minikube/bootstrapper/bsutil/files.go +++ b/pkg/minikube/bootstrapper/bsutil/files.go @@ -40,9 +40,9 @@ const ( // ConfigFileAssets returns configuration file assets func ConfigFileAssets(cfg config.KubernetesConfig, kubeadm []byte, kubelet []byte, kubeletSvc []byte, defaultCNIConfig []byte) []assets.CopyableFile { fs := []assets.CopyableFile{ - assets.NewMemoryAssetTarget(kubeadm, KubeadmYamlPath, "0640"), - assets.NewMemoryAssetTarget(kubelet, KubeletSystemdConfFile, "0644"), - assets.NewMemoryAssetTarget(kubeletSvc, KubeletServiceFile, "0644"), + assets.NewMemoryAssetTarget(kubeadm, KubeadmYamlPath+".new", "0640"), + assets.NewMemoryAssetTarget(kubelet, KubeletSystemdConfFile+".new", "0644"), + assets.NewMemoryAssetTarget(kubeletSvc, KubeletServiceFile+".new", "0644"), } // Copy the default CNI config (k8s.conf), so that kubelet can successfully // start a Pod in the case a user hasn't manually installed any CNI plugin diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go index e420307c65..75992ffd57 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go @@ -30,6 +30,7 @@ import ( "github.com/docker/machine/libmachine/state" "github.com/golang/glog" + core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" @@ -77,6 +78,68 @@ func apiServerPID(cr command.Runner) (int, error) { return strconv.Atoi(s) } +// ExpectedComponentsRunning returns whether or not all expected components are running +func ExpectedComponentsRunning(cs *kubernetes.Clientset) error { + expected := []string{ + "kube-dns", // coredns + "etcd", + "kube-apiserver", + "kube-controller-manager", + "kube-proxy", + "kube-scheduler", + } + + found := map[string]bool{} + + pods, err := cs.CoreV1().Pods("kube-system").List(meta.ListOptions{}) + if err != nil { + return err + } + + for _, pod := range pods.Items { + glog.Infof("found pod: %s", podStatusMsg(pod)) + if pod.Status.Phase != core.PodRunning { + continue + } + for k, v := range pod.ObjectMeta.Labels { + if k == "component" || k == "k8s-app" { + found[v] = true + } + } + } + + missing := []string{} + for _, e := range expected { + if !found[e] { + missing = append(missing, e) + } + } + if len(missing) > 0 { + return fmt.Errorf("missing components: %v", strings.Join(missing, ", ")) + } + return nil +} + +// podStatusMsg returns a human-readable pod status, for generating debug status +func podStatusMsg(pod core.Pod) string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("%q [%s] %s", pod.ObjectMeta.GetName(), pod.ObjectMeta.GetUID(), pod.Status.Phase)) + for i, c := range pod.Status.Conditions { + if c.Reason != "" { + if i == 0 { + sb.WriteString(": ") + } else { + sb.WriteString(" / ") + } + sb.WriteString(fmt.Sprintf("%s:%s", c.Type, c.Reason)) + } + if c.Message != "" { + sb.WriteString(fmt.Sprintf(" (%s)", c.Message)) + } + } + return sb.String() +} + // WaitForSystemPods verifies essential pods for running kurnetes is running func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner, client *kubernetes.Clientset, start time.Time, timeout time.Duration) error { glog.Info("waiting for kube-system pods to appear ...") @@ -99,6 +162,10 @@ func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr comm return false, nil } glog.Infof("%d kube-system pods found", len(pods.Items)) + for _, pod := range pods.Items { + glog.Infof(podStatusMsg(pod)) + } + if len(pods.Items) < 2 { return false, nil } @@ -160,7 +227,7 @@ func APIServerStatus(cr command.Runner, ip net.IP, port int) (state.State, error pid, err := apiServerPID(cr) if err != nil { - glog.Warningf("unable to get apiserver pid: %v", err) + glog.Warningf("stopped: unable to get apiserver pid: %v", err) return state.Stopped, nil } @@ -206,6 +273,7 @@ func apiServerHealthz(ip net.IP, port int) (state.State, error) { resp, err := client.Get(url) // Connection refused, usually. if err != nil { + glog.Infof("stopped: %s: %v", url, err) return state.Stopped, nil } if resp.StatusCode == http.StatusUnauthorized { diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 39618cf8a2..a1deb57745 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -202,7 +202,8 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } - c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), bsutil.KubeadmYamlPath, extraFlags, strings.Join(ignore, ","))) + conf := bsutil.KubeadmYamlPath + c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mv %s.new %s && %s init --config %s %s --ignore-preflight-errors=%s", conf, conf, bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ","))) rr, err := k.c.RunCmd(c) if err != nil { return errors.Wrapf(err, "init failed. output: %q", rr.Output()) @@ -229,6 +230,20 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { return nil } +func (k *Bootstrapper) controlPlaneEndpoint(cfg config.ClusterConfig) (string, int, error) { + cp, err := config.PrimaryControlPlane(&cfg) + if err != nil { + return "", 0, err + } + + if driver.IsKIC(cfg.Driver) { + ip := oci.DefaultBindIPV4 + port, err := oci.ForwardedPort(cfg.Driver, cfg.Name, cp.Port) + return ip, port, err + } + return cp.IP, cp.Port, nil +} + // client sets and returns a Kubernetes client to use to speak to a kubeadm launched apiserver func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error) { if k.k8sClient != nil { @@ -256,10 +271,7 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Duration) error { start := time.Now() out.T(out.Waiting, "Waiting for cluster to come online ...") - cp, err := config.PrimaryControlPlane(&cfg) - if err != nil { - return err - } + cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) if err != nil { return err @@ -269,14 +281,9 @@ func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Dur return err } - ip := cp.IP - port := cp.Port - if driver.IsKIC(cfg.Driver) { - ip = oci.DefaultBindIPV4 - port, err = oci.ForwardedPort(cfg.Driver, cfg.Name, port) - if err != nil { - return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name) - } + ip, port, err := k.controlPlaneEndpoint(cfg) + if err != nil { + return err } if err := kverify.WaitForHealthyAPIServer(cr, k, k.c, start, ip, port, timeout); err != nil { @@ -294,6 +301,31 @@ func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Dur return nil } +// needsReset returns whether or not the cluster needs to be reconfigured +func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kubernetes.Clientset) bool { + if _, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")); err != nil { + glog.Infof("needs reset: configs differ") + return true + } + + st, err := kverify.APIServerStatus(k.c, net.ParseIP(ip), port) + if err != nil { + glog.Infof("needs reset: apiserver error: %v", err) + return true + } + + if st != state.Running { + glog.Infof("needs reset: apiserver in state %s", st) + return true + } + + if err := kverify.ExpectedComponentsRunning(client); err != nil { + glog.Infof("needs reset: %v", err) + return true + } + return false +} + // restartCluster restarts the Kubernetes cluster configured by kubeadm func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { glog.Infof("restartCluster start") @@ -319,14 +351,36 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { glog.Errorf("failed to create compat symlinks: %v", err) } - baseCmd := fmt.Sprintf("%s %s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), phase) - cmds := []string{ - fmt.Sprintf("%s phase certs all --config %s", baseCmd, bsutil.KubeadmYamlPath), - fmt.Sprintf("%s phase kubeconfig all --config %s", baseCmd, bsutil.KubeadmYamlPath), - fmt.Sprintf("%s phase %s all --config %s", baseCmd, controlPlane, bsutil.KubeadmYamlPath), - fmt.Sprintf("%s phase etcd local --config %s", baseCmd, bsutil.KubeadmYamlPath), + ip, port, err := k.controlPlaneEndpoint(cfg) + if err != nil { + return errors.Wrap(err, "control plane") } + client, err := k.client(ip, port) + if err != nil { + return errors.Wrap(err, "getting k8s client") + } + + // If the cluster is running, check if we have any work to do. + conf := bsutil.KubeadmYamlPath + if !k.needsReset(conf, ip, port, client) { + glog.Infof("Taking a shortcut, as the cluster seems to be properly configured") + return nil + } + + if _, err := k.c.RunCmd(exec.Command("sudo", "mv", conf+".new", conf)); err != nil { + return errors.Wrap(err, "mv") + } + + baseCmd := fmt.Sprintf("%s %s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), phase) + cmds := []string{ + fmt.Sprintf("%s phase certs all --config %s", baseCmd, conf), + fmt.Sprintf("%s phase kubeconfig all --config %s", baseCmd, conf), + fmt.Sprintf("%s phase %s all --config %s", baseCmd, controlPlane, conf), + fmt.Sprintf("%s phase etcd local --config %s", baseCmd, conf), + } + + glog.Infof("resetting cluster from %s", conf) // Run commands one at a time so that it is easier to root cause failures. for _, c := range cmds { rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c)) @@ -337,7 +391,7 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) if err != nil { - return err + return errors.Wrap(err, "runtime") } // We must ensure that the apiserver is healthy before proceeding @@ -345,34 +399,19 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "apiserver healthz") } - for _, n := range cfg.Nodes { - ip := n.IP - port := n.Port - if driver.IsKIC(cfg.Driver) { - ip = oci.DefaultBindIPV4 - port, err = oci.ForwardedPort(cfg.Driver, cfg.Name, port) - if err != nil { - return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name) - } - } - client, err := k.client(ip, port) - if err != nil { - return errors.Wrap(err, "getting k8s client") - } - - if err := kverify.WaitForSystemPods(cr, k, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { - return errors.Wrap(err, "system pods") - } - - // Explicitly re-enable kubeadm addons (proxy, coredns) so that they will check for IP or configuration changes. - if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, bsutil.KubeadmYamlPath))); err != nil { - return errors.Wrapf(err, fmt.Sprintf("addon phase cmd:%q", rr.Command())) - } - - if err := bsutil.AdjustResourceLimits(k.c); err != nil { - glog.Warningf("unable to adjust resource limits: %v", err) - } + if err := kverify.WaitForSystemPods(cr, k, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { + return errors.Wrap(err, "system pods") } + + if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, bsutil.KubeadmYamlPath))); err != nil { + return errors.Wrapf(err, fmt.Sprintf("addon phase cmd:%q", rr.Command())) + } + + if err := bsutil.AdjustResourceLimits(k.c); err != nil { + glog.Warningf("unable to adjust resource limits: %v", err) + } + + glog.Infof("hope that was not too painful") return nil } @@ -437,11 +476,6 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { glog.Infof("kubelet %s config:\n%+v", kubeletCfg, cfg.KubernetesConfig) - // stop kubelet to avoid "Text File Busy" error - if err := stopKubelet(k.c); err != nil { - glog.Warningf("unable to stop kubelet: %s", err) - } - if err := bsutil.TransferBinaries(cfg.KubernetesConfig, k.c); err != nil { return errors.Wrap(err, "downloading binaries") } @@ -450,25 +484,19 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { if cfg.KubernetesConfig.EnableDefaultCNI { cniFile = []byte(defaultCNIConfig) } + + // Install assets into temporary files files := bsutil.ConfigFileAssets(cfg.KubernetesConfig, kubeadmCfg, kubeletCfg, kubeletService, cniFile) if err := copyFiles(k.c, files); err != nil { return err } - if err := startKubelet(k.c); err != nil { + if err := reloadKubelet(k.c); err != nil { return err } return nil } -func stopKubelet(runner command.Runner) error { - stopCmd := exec.Command("/bin/bash", "-c", "pgrep kubelet && sudo systemctl stop kubelet") - if rr, err := runner.RunCmd(stopCmd); err != nil { - return errors.Wrapf(err, "command: %q output: %q", rr.Command(), rr.Output()) - } - return nil -} - func copyFiles(runner command.Runner, files []assets.CopyableFile) error { // Combine mkdir request into a single call to reduce load dirs := []string{} @@ -488,8 +516,17 @@ func copyFiles(runner command.Runner, files []assets.CopyableFile) error { return nil } -func startKubelet(runner command.Runner) error { - startCmd := exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl start kubelet") +func reloadKubelet(runner command.Runner) error { + svc := bsutil.KubeletServiceFile + conf := bsutil.KubeletSystemdConfFile + + checkCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("pgrep kubelet && diff -u %s %s.new && diff -u %s %s.new", svc, svc, conf, conf)) + if _, err := runner.RunCmd(checkCmd); err == nil { + glog.Infof("kubelet is already running with the right configs") + return nil + } + + startCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mv %s.new %s && sudo mv %s.new %s && sudo systemctl daemon-reload && sudo systemctl restart kubelet", svc, svc, conf, conf)) if _, err := runner.RunCmd(startCmd); err != nil { return errors.Wrap(err, "starting kubelet") } From bb387039d17594830387be1e3acc34cd7e804f0c Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 20 Mar 2020 11:29:36 -0700 Subject: [PATCH 126/668] move panic out of library --- cmd/minikube/cmd/start.go | 10 ++++++++-- pkg/minikube/driver/driver.go | 6 ------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index c9018c09d6..2508cc9511 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -465,13 +465,19 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState { out.T(out.Warning, warning, out.V{"driver": d, "vmd": vmd}) } ds := driver.Status(d) + if ds.Name == "" { + exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS}) + } out.T(out.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()}) return ds } // Fallback to old driver parameter - if viper.GetString("vm-driver") != "" { + if d := viper.GetString("vm-driver"); d != "" { ds := driver.Status(viper.GetString("vm-driver")) + if ds.Name == "" { + exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS}) + } out.T(out.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()}) return ds } @@ -551,7 +557,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { name := ds.Name glog.Infof("validating driver %q against %+v", name, existing) if !driver.Supported(name) { - exit.WithCodeT(exit.Unavailable, "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS}) + exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS}) } st := ds.State diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index 1e73df9edf..2dce6350cd 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -19,15 +19,12 @@ package driver import ( "fmt" "os" - "runtime" "sort" "strings" "github.com/golang/glog" "k8s.io/minikube/pkg/drivers/kic" "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/registry" ) @@ -217,9 +214,6 @@ func Suggest(options []registry.DriverState) (registry.DriverState, []registry.D // Status returns the status of a driver func Status(name string) registry.DriverState { d := registry.Driver(name) - if d.Empty() { - exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS}) - } return registry.DriverState{ Name: d.Name, Priority: d.Priority, From 9d0226e3586956335aa72113bdf5430556d11193 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Fri, 20 Mar 2020 11:31:21 -0700 Subject: [PATCH 127/668] remove silly message --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index a1deb57745..45c6e65010 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -411,7 +411,6 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { glog.Warningf("unable to adjust resource limits: %v", err) } - glog.Infof("hope that was not too painful") return nil } From ec57b5e606309f8bf12810d460a29ccb3c91e363 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Fri, 20 Mar 2020 11:30:21 -0700 Subject: [PATCH 128/668] If container isn't found, interpret as "nothing to do" and return normally --- cmd/minikube/cmd/delete.go | 4 +--- pkg/minikube/machine/delete.go | 5 +++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index 89f1eed4e6..253f48128a 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -239,9 +239,7 @@ func deleteProfile(profile *config.Profile) error { out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err}) } - if driver.IsVM(cc.Driver) { - deleteHosts(api, cc) - } + deleteHosts(api, cc) // In case DeleteHost didn't complete the job. deleteProfileDirectory(profile.Name) diff --git a/pkg/minikube/machine/delete.go b/pkg/minikube/machine/delete.go index 518f7b3fc6..888f8f158e 100644 --- a/pkg/minikube/machine/delete.go +++ b/pkg/minikube/machine/delete.go @@ -66,8 +66,9 @@ func DeleteHost(api libmachine.API, machineName string) error { // Get the status of the host. Ensure that it exists before proceeding ahead. status, err := Status(api, machineName) if err != nil { - // Warn, but proceed - out.WarningT(`Unable to get host status for "{{.name}}": {{.error}}`, out.V{"name": machineName, "error": err}) + // Assume that the host has already been deleted, log and return + glog.Infof("Unable to get host status for %s, assuming it has already been deleted: %v", machineName, err) + return nil } if status == state.None.String() { From 2822ee596e7b6ec63cdcf15cbf21d7a998f1c29e Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Fri, 20 Mar 2020 11:35:19 -0700 Subject: [PATCH 129/668] update urls --- site/content/en/docs/Contributing/triage.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/site/content/en/docs/Contributing/triage.md b/site/content/en/docs/Contributing/triage.md index 5fb1e24ab7..318312107b 100644 --- a/site/content/en/docs/Contributing/triage.md +++ b/site/content/en/docs/Contributing/triage.md @@ -25,7 +25,7 @@ Daily triage has two goals: 1. Responsiveness for new issues 1. Responsiveness when explicitly requested information was provided -The list of outstanding items are at https://teaparty-tts3vkcpgq-uc.a.run.app/s/daily-triage - it covers: +The list of outstanding items are at http://tinyurl.com/mk-tparty/daily-triage - it covers: 1. Issues without a `kind/` or `triage/` label 1. Issues without a `priority/` label @@ -119,7 +119,7 @@ Weekly triage has three goals: 1. Reviewing and closing PR’s 1. Closing stale issues -The list of outstanding items can be found at https://teaparty-tts3vkcpgq-uc.a.run.app/s/weekly-triage. +The list of outstanding items can be found at http://tinyurl.com/mk-tparty/weekly-triage. ## Post-Release Triage From c027fc0e6247fd3cc93eb7702d3703b141071632 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Fri, 20 Mar 2020 13:35:13 -0700 Subject: [PATCH 130/668] Make certificates per-profile and consistent until IP or names change --- pkg/minikube/bootstrapper/certs.go | 221 +++++++++++-------- pkg/minikube/bootstrapper/certs_test.go | 17 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 3 +- pkg/minikube/localpath/localpath.go | 5 + pkg/minikube/node/config.go | 5 +- pkg/provision/buildroot.go | 2 +- 6 files changed, 148 insertions(+), 105 deletions(-) diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index 652392cb65..675ba79ed2 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -17,6 +17,7 @@ limitations under the License. package bootstrapper import ( + "crypto/sha1" "encoding/pem" "fmt" "io/ioutil" @@ -25,9 +26,11 @@ import ( "os/exec" "path" "path/filepath" + "sort" "strings" "github.com/golang/glog" + "github.com/otiai10/copy" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/clientcmd/api" @@ -40,63 +43,50 @@ import ( "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util" - "k8s.io/minikube/pkg/util/lock" - - "github.com/juju/mutex" -) - -var ( - certs = []string{ - "ca.crt", "ca.key", "apiserver.crt", "apiserver.key", "proxy-client-ca.crt", - "proxy-client-ca.key", "proxy-client.crt", "proxy-client.key", - } ) // SetupCerts gets the generated credentials required to talk to the APIServer. -func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) error { - - localPath := localpath.MiniPath() +func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) ([]assets.CopyableFile, error) { + localPath := localpath.Profile(k8s.ClusterName) glog.Infof("Setting up %s for IP: %s\n", localPath, n.IP) - // WARNING: This function was not designed for multiple profiles, so it is VERY racey: - // - // It updates a shared certificate file and uploads it to the apiserver before launch. - // - // If another process updates the shared certificate, it's invalid. - // TODO: Instead of racey manipulation of a shared certificate, use per-profile certs - spec := lock.PathMutexSpec(filepath.Join(localPath, "certs")) - glog.Infof("acquiring lock: %+v", spec) - releaser, err := mutex.Acquire(spec) + ccs, err := generateSharedCACerts() if err != nil { - return errors.Wrapf(err, "unable to acquire lock for %+v", spec) + return nil, errors.Wrap(err, "shared CA certs") } - defer releaser.Release() - if err := generateCerts(k8s, n); err != nil { - return errors.Wrap(err, "Error generating certs") + xfer, err := generateProfileCerts(k8s, n, ccs) + if err != nil { + return nil, errors.Wrap(err, "profile certs") } + + xfer = append(xfer, ccs.caCert) + xfer = append(xfer, ccs.caKey) + xfer = append(xfer, ccs.proxyCert) + xfer = append(xfer, ccs.proxyKey) + copyableFiles := []assets.CopyableFile{} - for _, cert := range certs { - p := filepath.Join(localPath, cert) + for _, p := range xfer { + cert := filepath.Base(p) perms := "0644" if strings.HasSuffix(cert, ".key") { perms = "0600" } certFile, err := assets.NewFileAsset(p, vmpath.GuestKubernetesCertsDir, cert, perms) if err != nil { - return err + return nil, errors.Wrapf(err, "key asset %s", cert) } copyableFiles = append(copyableFiles, certFile) } caCerts, err := collectCACerts() if err != nil { - return err + return nil, err } for src, dst := range caCerts { certFile, err := assets.NewFileAsset(src, path.Dir(dst), path.Base(dst), "0644") if err != nil { - return err + return nil, errors.Wrapf(err, "ca asset %s", src) } copyableFiles = append(copyableFiles, certFile) @@ -114,11 +104,11 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) kubeCfg := api.NewConfig() err = kubeconfig.PopulateFromSettings(kcs, kubeCfg) if err != nil { - return errors.Wrap(err, "populating kubeconfig") + return nil, errors.Wrap(err, "populating kubeconfig") } data, err := runtime.Encode(latest.Codec, kubeCfg) if err != nil { - return errors.Wrap(err, "encoding kubeconfig") + return nil, errors.Wrap(err, "encoding kubeconfig") } kubeCfgFile := assets.NewMemoryAsset(data, vmpath.GuestPersistentDir, "kubeconfig", "0644") @@ -126,46 +116,74 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) for _, f := range copyableFiles { if err := cmd.Copy(f); err != nil { - return errors.Wrapf(err, "Copy %s", f.GetAssetName()) + return nil, errors.Wrapf(err, "Copy %s", f.GetAssetName()) } } if err := installCertSymlinks(cmd, caCerts); err != nil { - return errors.Wrapf(err, "certificate symlinks") + return nil, errors.Wrapf(err, "certificate symlinks") } - return nil + return copyableFiles, nil } -func generateCerts(k8s config.KubernetesConfig, n config.Node) error { - serviceIP, err := util.GetServiceClusterIP(k8s.ServiceCIDR) - if err != nil { - return errors.Wrap(err, "getting service cluster ip") +type CACerts struct { + caCert string + caKey string + proxyCert string + proxyKey string +} + +// generateSharedCACerts generates CA certs shared among profiles, but only if missing +func generateSharedCACerts() (CACerts, error) { + globalPath := localpath.MiniPath() + cc := CACerts{ + caCert: filepath.Join(globalPath, "ca.crt"), + caKey: filepath.Join(globalPath, "ca.key"), + proxyCert: filepath.Join(globalPath, "proxy-client-ca.crt"), + proxyKey: filepath.Join(globalPath, "proxy-client-ca.key"), } - localPath := localpath.MiniPath() - caCertPath := filepath.Join(localPath, "ca.crt") - caKeyPath := filepath.Join(localPath, "ca.key") - - proxyClientCACertPath := filepath.Join(localPath, "proxy-client-ca.crt") - proxyClientCAKeyPath := filepath.Join(localPath, "proxy-client-ca.key") - caCertSpecs := []struct { certPath string keyPath string subject string }{ { // client / apiserver CA - certPath: caCertPath, - keyPath: caKeyPath, + certPath: cc.caCert, + keyPath: cc.caKey, subject: "minikubeCA", }, { // proxy-client CA - certPath: proxyClientCACertPath, - keyPath: proxyClientCAKeyPath, + certPath: cc.proxyCert, + keyPath: cc.proxyKey, subject: "proxyClientCA", }, } + for _, ca := range caCertSpecs { + if canRead(ca.certPath) && canRead(ca.keyPath) { + glog.Infof("skipping %s CA generation: %s", ca.subject, ca.keyPath) + continue + } + + glog.Infof("generating %s CA: %s", ca.subject, ca.keyPath) + if err := util.GenerateCACert(ca.certPath, ca.keyPath, ca.subject); err != nil { + return cc, errors.Wrap(err, "generate ca cert") + } + } + + return cc, nil +} + +// generateProfileCerts generates profile certs for a profile +func generateProfileCerts(k8s config.KubernetesConfig, n config.Node, ccs CACerts) ([]string, error) { + profilePath := localpath.Profile(k8s.ClusterName) + + serviceIP, err := util.GetServiceClusterIP(k8s.ServiceCIDR) + if err != nil { + return nil, errors.Wrap(err, "getting service cluster ip") + } + apiServerIPs := append( k8s.APIServerIPs, []net.IP{net.ParseIP(n.IP), serviceIP, net.ParseIP(oci.DefaultBindIPV4), net.ParseIP("10.0.0.1")}...) @@ -174,9 +192,19 @@ func generateCerts(k8s config.KubernetesConfig, n config.Node) error { apiServerNames, util.GetAlternateDNS(k8s.DNSDomain)...) - signedCertSpecs := []struct { - certPath string - keyPath string + // Generate a hash input for certs that depend on ip/name combinations + hi := []string{} + hi = append(hi, apiServerAlternateNames...) + for _, ip := range apiServerIPs { + hi = append(hi, ip.String()) + } + sort.Strings(hi) + + specs := []struct { + certPath string + keyPath string + hash string + subject string ips []net.IP alternateNames []string @@ -184,56 +212,77 @@ func generateCerts(k8s config.KubernetesConfig, n config.Node) error { caKeyPath string }{ { // Client cert - certPath: filepath.Join(localPath, "client.crt"), - keyPath: filepath.Join(localPath, "client.key"), + certPath: filepath.Join(profilePath, "client.crt"), + keyPath: filepath.Join(profilePath, "client.key"), subject: "minikube-user", ips: []net.IP{}, alternateNames: []string{}, - caCertPath: caCertPath, - caKeyPath: caKeyPath, + caCertPath: ccs.caCert, + caKeyPath: ccs.caKey, }, { // apiserver serving cert - certPath: filepath.Join(localPath, "apiserver.crt"), - keyPath: filepath.Join(localPath, "apiserver.key"), + hash: fmt.Sprintf("%x", sha1.Sum([]byte(strings.Join(hi, "/"))))[0:8], + certPath: filepath.Join(profilePath, "apiserver.crt"), + keyPath: filepath.Join(profilePath, "apiserver.key"), subject: "minikube", ips: apiServerIPs, alternateNames: apiServerAlternateNames, - caCertPath: caCertPath, - caKeyPath: caKeyPath, + caCertPath: ccs.caCert, + caKeyPath: ccs.caKey, }, { // aggregator proxy-client cert - certPath: filepath.Join(localPath, "proxy-client.crt"), - keyPath: filepath.Join(localPath, "proxy-client.key"), + certPath: filepath.Join(profilePath, "proxy-client.crt"), + keyPath: filepath.Join(profilePath, "proxy-client.key"), subject: "aggregator", ips: []net.IP{}, alternateNames: []string{}, - caCertPath: proxyClientCACertPath, - caKeyPath: proxyClientCAKeyPath, + caCertPath: ccs.proxyCert, + caKeyPath: ccs.proxyKey, }, } - for _, caCertSpec := range caCertSpecs { - if !(canReadFile(caCertSpec.certPath) && - canReadFile(caCertSpec.keyPath)) { - if err := util.GenerateCACert( - caCertSpec.certPath, caCertSpec.keyPath, caCertSpec.subject, - ); err != nil { - return errors.Wrap(err, "Error generating CA certificate") + xfer := []string{} + for _, spec := range specs { + if spec.subject != "minikube-user" { + xfer = append(xfer, spec.certPath) + xfer = append(xfer, spec.keyPath) + } + + cp := spec.certPath + kp := spec.keyPath + if spec.hash != "" { + cp = cp + "." + spec.hash + kp = kp + "." + spec.hash + } + + if canRead(cp) && canRead(kp) { + glog.Infof("skipping %s signed cert generation: %s", spec.subject, kp) + continue + } + + glog.Infof("generating %s signed cert: %s", spec.subject, kp) + err := util.GenerateSignedCert( + cp, kp, spec.subject, + spec.ips, spec.alternateNames, + spec.caCertPath, spec.caKeyPath, + ) + if err != nil { + return xfer, errors.Wrapf(err, "generate signed cert for %q", spec.subject) + } + + if spec.hash != "" { + glog.Infof("copying %s -> %s", cp, spec.certPath) + if err := copy.Copy(cp, spec.certPath); err != nil { + return xfer, errors.Wrap(err, "copy cert") + } + glog.Infof("copying %s -> %s", kp, spec.keyPath) + if err := copy.Copy(kp, spec.keyPath); err != nil { + return xfer, errors.Wrap(err, "copy key") } } } - for _, signedCertSpec := range signedCertSpecs { - if err := util.GenerateSignedCert( - signedCertSpec.certPath, signedCertSpec.keyPath, signedCertSpec.subject, - signedCertSpec.ips, signedCertSpec.alternateNames, - signedCertSpec.caCertPath, signedCertSpec.caKeyPath, - ); err != nil { - return errors.Wrap(err, "Error generating signed apiserver serving cert") - } - } - - return nil + return xfer, nil } // isValidPEMCertificate checks whether the input file is a valid PEM certificate (with at least one CERTIFICATE block) @@ -355,9 +404,9 @@ func installCertSymlinks(cr command.Runner, caCerts map[string]string) error { return nil } -// canReadFile returns true if the file represented +// canRead returns true if the file represented // by path exists and is readable, otherwise false. -func canReadFile(path string) bool { +func canRead(path string) bool { f, err := os.Open(path) if err != nil { return false diff --git a/pkg/minikube/bootstrapper/certs_test.go b/pkg/minikube/bootstrapper/certs_test.go index d92e174660..fd96b6a838 100644 --- a/pkg/minikube/bootstrapper/certs_test.go +++ b/pkg/minikube/bootstrapper/certs_test.go @@ -24,7 +24,6 @@ import ( "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" - "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/tests" "k8s.io/minikube/pkg/util" ) @@ -58,20 +57,8 @@ func TestSetupCerts(t *testing.T) { f := command.NewFakeCommandRunner() f.SetCommandToOutput(expected) - var filesToBeTransferred []string - for _, cert := range certs { - filesToBeTransferred = append(filesToBeTransferred, filepath.Join(localpath.MiniPath(), cert)) - } - filesToBeTransferred = append(filesToBeTransferred, filepath.Join(localpath.MiniPath(), "ca.crt")) - filesToBeTransferred = append(filesToBeTransferred, filepath.Join(localpath.MiniPath(), "certs", "mycert.pem")) - - if err := SetupCerts(f, k8s, config.Node{}); err != nil { + _, err := SetupCerts(f, k8s, config.Node{}) + if err != nil { t.Fatalf("Error starting cluster: %v", err) } - for _, cert := range filesToBeTransferred { - _, err := f.GetFileToContents(cert) - if err != nil { - t.Errorf("Cert not generated: %s", cert) - } - } } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 39618cf8a2..df5dc17a0e 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -397,7 +397,8 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { // SetupCerts sets up certificates within the cluster. func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node) error { - return bootstrapper.SetupCerts(k.c, k8s, n) + _, err := bootstrapper.SetupCerts(k.c, k8s, n) + return err } // UpdateCluster updates the cluster diff --git a/pkg/minikube/localpath/localpath.go b/pkg/minikube/localpath/localpath.go index d9faef5d9a..3dd6d4158c 100644 --- a/pkg/minikube/localpath/localpath.go +++ b/pkg/minikube/localpath/localpath.go @@ -54,6 +54,11 @@ func MakeMiniPath(fileName ...string) string { return filepath.Join(args...) } +// Profile returns the path to a profile +func Profile(name string) string { + return filepath.Join(MiniPath(), "profiles", name) +} + // MachinePath returns the Minikube machine path of a machine func MachinePath(machine string, miniHome ...string) string { miniPath := MiniPath() diff --git a/pkg/minikube/node/config.go b/pkg/minikube/node/config.go index b29867f1b6..1ee307dcc6 100644 --- a/pkg/minikube/node/config.go +++ b/pkg/minikube/node/config.go @@ -122,6 +122,7 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, node config.Nod if err := bs.UpdateCluster(cfg); err != nil { exit.WithError("Failed to update cluster", err) } + if err := bs.SetupCerts(cfg.KubernetesConfig, node); err != nil { exit.WithError("Failed to setup certs", err) } @@ -137,8 +138,8 @@ func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clu kcs := &kubeconfig.Settings{ ClusterName: clusterName, ClusterServerAddress: addr, - ClientCertificate: localpath.MakeMiniPath("client.crt"), - ClientKey: localpath.MakeMiniPath("client.key"), + ClientCertificate: filepath.Join(localpath.Profile(cc.Name), "client.crt"), + ClientKey: filepath.Join(localpath.Profile(cc.Name), "client.key"), CertificateAuthority: localpath.MakeMiniPath("ca.crt"), KeepContext: viper.GetBool(keepContext), EmbedCerts: viper.GetBool(embedCerts), diff --git a/pkg/provision/buildroot.go b/pkg/provision/buildroot.go index 193478f215..15d9d39d74 100644 --- a/pkg/provision/buildroot.go +++ b/pkg/provision/buildroot.go @@ -160,7 +160,7 @@ WantedBy=multi-user.target return nil, err } - if err := p.Service("docker", serviceaction.Restart); err != nil { + if err := p.Service("docker", serviceaction.Start); err != nil { return nil, err } return dockerCfg, nil From 0711bd07bded295c09435baf3bc32b4fef1d2879 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Fri, 20 Mar 2020 13:38:42 -0700 Subject: [PATCH 131/668] Revert test change --- pkg/provision/buildroot.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/provision/buildroot.go b/pkg/provision/buildroot.go index 15d9d39d74..193478f215 100644 --- a/pkg/provision/buildroot.go +++ b/pkg/provision/buildroot.go @@ -160,7 +160,7 @@ WantedBy=multi-user.target return nil, err } - if err := p.Service("docker", serviceaction.Start); err != nil { + if err := p.Service("docker", serviceaction.Restart); err != nil { return nil, err } return dockerCfg, nil From 35aec77fe4a595c169a67f78aff76849029c55ec Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 20 Mar 2020 14:07:43 -0700 Subject: [PATCH 132/668] code comments --- cmd/minikube/cmd/node_add.go | 4 +--- cmd/minikube/cmd/ssh.go | 3 +-- cmd/minikube/cmd/start.go | 4 ++-- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 5 ----- pkg/minikube/node/node.go | 6 ++++++ 5 files changed, 10 insertions(+), 12 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 1e28103142..d593639b4d 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -17,8 +17,6 @@ limitations under the License. package cmd import ( - "fmt" - "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/spf13/viper" @@ -48,7 +46,7 @@ var nodeAddCmd = &cobra.Command{ out.ErrT(out.FailureType, "none driver does not support multi-node clusters") } - name := fmt.Sprintf("m%02d", len(cc.Nodes)+1) + name := node.Name(len(cc.Nodes) + 1) out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) diff --git a/cmd/minikube/cmd/ssh.go b/cmd/minikube/cmd/ssh.go index 917733ac81..4a8508ec5d 100644 --- a/cmd/minikube/cmd/ssh.go +++ b/cmd/minikube/cmd/ssh.go @@ -60,8 +60,7 @@ var sshCmd = &cobra.Command{ } else { n, _, err = node.Retrieve(cc, nodeName) if err != nil { - out.FailureT("Node {{.nodeName}} does not exist.", out.V{"nodeName": nodeName}) - exit.WithError("", err) + exit.WithCodeT(exit.Unavailable, "Node {{.nodeName}} does not exist.", out.V{"nodeName": nodeName}) } } host, err := machine.LoadHost(api, driver.MachineName(*cc, *n)) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 3a6771976a..75ff638511 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -356,10 +356,10 @@ func runStart(cmd *cobra.Command, args []string) { } if numNodes > 1 { if driver.BareMetal(driverName) { - out.T(out.Meh, "The none driver is not compatible with multi-node clusters.") + exit.WithCodeT(exit.Config, "The none driver is not compatible with multi-node clusters.") } else { for i := 1; i < numNodes; i++ { - nodeName := fmt.Sprintf("m%02d", i+1) + nodeName := node.Name(i + 1) n := config.Node{ Name: nodeName, Worker: true, diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 37516d0f09..d00f166a54 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -419,13 +419,8 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { return "", errors.Wrap(err, "generating bootstrap token") } - /*cp, err := config.PrimaryControlPlane(&cc) - if err != nil { - return "", errors.Wrap(err, "getting primary control plane") - }*/ joinCmd := r.Stdout.String() joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), 1) - //joinCmd = strings.ReplaceAll(joinCmd, "localhost", cp.IP) joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) return joinCmd, nil diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 55b2fdf298..97e9d2f204 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -18,6 +18,7 @@ package node import ( "errors" + "fmt" "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" @@ -91,3 +92,8 @@ func Save(cfg *config.ClusterConfig, node *config.Node) error { } return config.SaveProfile(viper.GetString(config.ProfileName), cfg) } + +// Name returns the appropriate name for the node given the current number of nodes +func Name(index int) string { + return fmt.Sprintf("m%02d", index) +} From a09aa6253588b1a6c75033aef55a43214090923f Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 20 Mar 2020 14:34:49 -0700 Subject: [PATCH 133/668] delete admin.conf before running kubedm init --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index d00f166a54..7a3ade16c2 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -165,6 +165,13 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { glog.Infof("StartCluster complete in %s", time.Since(start)) }() + // Remove admin.conf from any previous run + c := exec.Command("/bin/bash", "-c", "sudo rm -f /etc/kubernetes/admin.conf") + _, err = k.c.RunCmd(c) + if err != nil { + return errors.Wrap(err, "deleting admin.conf") + } + version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) if err != nil { return errors.Wrap(err, "parsing kubernetes version") @@ -202,7 +209,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } - c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), bsutil.KubeadmYamlPath, extraFlags, strings.Join(ignore, ","))) + c = exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), bsutil.KubeadmYamlPath, extraFlags, strings.Join(ignore, ","))) rr, err := k.c.RunCmd(c) if err != nil { return errors.Wrapf(err, "init failed. output: %q", rr.Output()) From 4faa31ebcf7651220e6fe0527c517707e2a836f3 Mon Sep 17 00:00:00 2001 From: Prasad Katti Date: Fri, 20 Mar 2020 14:56:04 -0700 Subject: [PATCH 134/668] Add logic to get desc node output in bootstrapper LogCommands func --- cmd/minikube/cmd/logs.go | 6 ++--- pkg/minikube/bootstrapper/bootstrapper.go | 2 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 12 ++++++--- pkg/minikube/logs/logs.go | 28 ++++++-------------- pkg/minikube/node/start.go | 2 +- 5 files changed, 22 insertions(+), 28 deletions(-) diff --git a/cmd/minikube/cmd/logs.go b/cmd/minikube/cmd/logs.go index cf36b52cb2..66ed68d675 100644 --- a/cmd/minikube/cmd/logs.go +++ b/cmd/minikube/cmd/logs.go @@ -95,18 +95,18 @@ var logsCmd = &cobra.Command{ exit.WithError("Unable to get runtime", err) } if followLogs { - err := logs.Follow(cr, bs, runner) + err := logs.Follow(cr, bs, *cfg, runner) if err != nil { exit.WithError("Follow", err) } return } if showProblems { - problems := logs.FindProblems(cr, bs, runner) + problems := logs.FindProblems(cr, bs, *cfg, runner) logs.OutputProblems(problems, numberOfProblems) return } - err = logs.Output(cr, bs, runner, numberOfLines) + err = logs.Output(cr, bs, *cfg, runner, numberOfLines) if err != nil { exit.WithError("Error getting machine logs", err) } diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 712446285d..6c1e48442a 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -40,7 +40,7 @@ type Bootstrapper interface { DeleteCluster(config.KubernetesConfig) error WaitForCluster(config.ClusterConfig, time.Duration) error // LogCommands returns a map of log type to a command which will display that log. - LogCommands(LogOptions) map[string]string + LogCommands(config.ClusterConfig, LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error GetKubeletStatus() (string, error) GetAPIServerStatus(net.IP, int) (string, error) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 92f896e4ef..f2f4d47c73 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -110,7 +110,7 @@ func (k *Bootstrapper) GetAPIServerStatus(ip net.IP, port int) (string, error) { } // LogCommands returns a map of log type to a command which will display that log. -func (k *Bootstrapper) LogCommands(o bootstrapper.LogOptions) map[string]string { +func (k *Bootstrapper) LogCommands(cfg config.ClusterConfig, o bootstrapper.LogOptions) map[string]string { var kubelet strings.Builder kubelet.WriteString("sudo journalctl -u kubelet") if o.Lines > 0 { @@ -128,9 +128,15 @@ func (k *Bootstrapper) LogCommands(o bootstrapper.LogOptions) map[string]string if o.Lines > 0 { dmesg.WriteString(fmt.Sprintf(" | tail -n %d", o.Lines)) } + + describe_nodes := fmt.Sprintf("sudo %s describe node -A --kubeconfig=%s", + path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"), + path.Join(vmpath.GuestPersistentDir, "kubeconfig")) + return map[string]string{ - "kubelet": kubelet.String(), - "dmesg": dmesg.String(), + "kubelet": kubelet.String(), + "dmesg": dmesg.String(), + "describe nodes": describe_nodes, } } diff --git a/pkg/minikube/logs/logs.go b/pkg/minikube/logs/logs.go index a44a9b6073..f284f0e98c 100644 --- a/pkg/minikube/logs/logs.go +++ b/pkg/minikube/logs/logs.go @@ -23,20 +23,17 @@ import ( "fmt" "os" "os/exec" - "path" "regexp" "sort" "strings" "github.com/golang/glog" "github.com/pkg/errors" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/minikube/vmpath" ) // rootCauseRe is a regular expression that matches known failure root causes @@ -66,9 +63,9 @@ type logRunner interface { const lookBackwardsCount = 400 // Follow follows logs from multiple files in tail(1) format -func Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr logRunner) error { +func Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr logRunner) error { cs := []string{} - for _, v := range logCommands(r, bs, 0, true) { + for _, v := range logCommands(r, bs, cfg, 0, true) { cs = append(cs, v+" &") } cs = append(cs, "wait") @@ -88,9 +85,9 @@ func IsProblem(line string) bool { } // FindProblems finds possible root causes among the logs -func FindProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr logRunner) map[string][]string { +func FindProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr logRunner) map[string][]string { pMap := map[string][]string{} - cmds := logCommands(r, bs, lookBackwardsCount, false) + cmds := logCommands(r, bs, cfg, lookBackwardsCount, false) for name := range cmds { glog.Infof("Gathering logs for %s ...", name) var b bytes.Buffer @@ -132,8 +129,8 @@ func OutputProblems(problems map[string][]string, maxLines int) { } // Output displays logs from multiple sources in tail(1) format -func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner command.Runner, lines int) error { - cmds := logCommands(r, bs, lines, false) +func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, runner command.Runner, lines int) error { + cmds := logCommands(r, bs, cfg, lines, false) cmds["kernel"] = "uptime && uname -a && grep PRETTY /etc/os-release" names := []string{} @@ -170,8 +167,8 @@ func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner command.Run } // logCommands returns a list of commands that would be run to receive the anticipated logs -func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, length int, follow bool) map[string]string { - cmds := bs.LogCommands(bootstrapper.LogOptions{Lines: length, Follow: follow}) +func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, length int, follow bool) map[string]string { + cmds := bs.LogCommands(cfg, bootstrapper.LogOptions{Lines: length, Follow: follow}) for _, pod := range importantPods { ids, err := r.ListContainers(cruntime.ListOptions{Name: pod}) if err != nil { @@ -191,14 +188,5 @@ func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, length int, f cmds[r.Name()] = r.SystemLogCmd(length) cmds["container status"] = cruntime.ContainerStatusCommand() - cfg, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - out.ErrLn("Error loading profile config: %v", err) - } - - cmds["describe nodes"] = fmt.Sprintf("sudo %s describe node -A --kubeconfig=%s", - path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"), - path.Join(vmpath.GuestPersistentDir, "kubeconfig")) - return cmds } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index a3c5eee92b..0916134f6d 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -88,7 +88,7 @@ func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons // pull images or restart cluster out.T(out.Launch, "Launching Kubernetes ... ") if err := bs.StartCluster(mc); err != nil { - exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) + exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mc, mRunner)) } configureMounts() From 05814cce2833477531397d57a0a672f9c8e7f8e5 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 20 Mar 2020 15:10:45 -0700 Subject: [PATCH 135/668] only apply kic networking overlay to control plane --- pkg/minikube/bootstrapper/bootstrapper.go | 1 - pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 17 ++++------------- pkg/minikube/node/start.go | 3 --- 3 files changed, 4 insertions(+), 17 deletions(-) diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 1dac315e80..5627e1b3c3 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -42,7 +42,6 @@ type Bootstrapper interface { WaitForNode(config.ClusterConfig, config.Node, time.Duration) error JoinCluster(config.ClusterConfig, config.Node, string) error UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error - SetupNode(config.ClusterConfig) error GenerateToken(config.ClusterConfig) (string, error) // LogCommands returns a map of log type to a command which will display that log. LogCommands(LogOptions) map[string]string diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 7a3ade16c2..1c0268d779 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -215,8 +215,10 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { return errors.Wrapf(err, "init failed. output: %q", rr.Output()) } - if err = k.SetupNode(cfg); err != nil { - return errors.Wrap(err, "setting up node") + if cfg.Driver == driver.Docker { + if err := k.applyKicOverlay(cfg); err != nil { + return errors.Wrap(err, "apply kic overlay") + } } if err := k.applyNodeLabels(cfg); err != nil { @@ -234,17 +236,6 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { return nil } -// SetupNode runs commands that need to be on all nodes -func (k *Bootstrapper) SetupNode(cfg config.ClusterConfig) error { - if cfg.Driver == driver.Docker { - if err := k.applyKicOverlay(cfg); err != nil { - return errors.Wrap(err, "apply kic overlay") - } - } - - return nil -} - // client sets and returns a Kubernetes client to use to speak to a kubeadm launched apiserver func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error) { if k.k8sClient != nil { diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 6f9b441366..b46469837e 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -124,9 +124,6 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo exit.WithError("setting up certs", err) } - if err = bs.SetupNode(cc); err != nil { - exit.WithError("Failed to setup node", err) - } } configureMounts() From c3f1f5e04bc1e7cd3ce45e63538e114f5f1af09f Mon Sep 17 00:00:00 2001 From: Prasad Katti Date: Fri, 20 Mar 2020 18:10:06 -0700 Subject: [PATCH 136/668] some more plumbing work to pass cfg in kverify.go --- .../bootstrapper/bsutil/kverify/kverify.go | 17 +++++++++-------- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 14 +++++++------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go index 0b4c8d887c..47af70f492 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go @@ -36,6 +36,7 @@ import ( kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/logs" ) @@ -44,7 +45,7 @@ import ( const minLogCheckTime = 30 * time.Second // WaitForAPIServerProcess waits for api server to be healthy returns error if it doesn't -func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner, start time.Time, timeout time.Duration) error { +func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, timeout time.Duration) error { glog.Infof("waiting for apiserver process to appear ...") err := wait.PollImmediate(time.Millisecond*500, timeout, func() (bool, error) { if time.Since(start) > timeout { @@ -52,7 +53,7 @@ func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, c } if time.Since(start) > minLogCheckTime { - announceProblems(r, bs, cr) + announceProblems(r, bs, cfg, cr) time.Sleep(kconst.APICallRetryInterval * 5) } @@ -79,7 +80,7 @@ func apiServerPID(cr command.Runner) (int, error) { } // WaitForSystemPods verifies essential pods for running kurnetes is running -func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner, client *kubernetes.Clientset, start time.Time, timeout time.Duration) error { +func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, timeout time.Duration) error { glog.Info("waiting for kube-system pods to appear ...") pStart := time.Now() @@ -88,7 +89,7 @@ func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr comm return false, fmt.Errorf("cluster wait timed out during pod check") } if time.Since(start) > minLogCheckTime { - announceProblems(r, bs, cr) + announceProblems(r, bs, cfg, cr) time.Sleep(kconst.APICallRetryInterval * 5) } @@ -112,7 +113,7 @@ func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr comm } // WaitForHealthyAPIServer waits for api server status to be running -func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner, start time.Time, ip string, port int, timeout time.Duration) error { +func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, ip string, port int, timeout time.Duration) error { glog.Infof("waiting for apiserver healthz status ...") hStart := time.Now() @@ -122,7 +123,7 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c } if time.Since(start) > minLogCheckTime { - announceProblems(r, bs, cr) + announceProblems(r, bs, cfg, cr) time.Sleep(kconst.APICallRetryInterval * 5) } @@ -145,8 +146,8 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c } // announceProblems checks for problems, and slows polling down if any are found -func announceProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner) { - problems := logs.FindProblems(r, bs, cr) +func announceProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner) { + problems := logs.FindProblems(r, bs, cfg, cr) if len(problems) > 0 { logs.OutputProblems(problems, 5) time.Sleep(kconst.APICallRetryInterval * 15) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index e669621000..4c6a9f9f9e 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -129,14 +129,14 @@ func (k *Bootstrapper) LogCommands(cfg config.ClusterConfig, o bootstrapper.LogO dmesg.WriteString(fmt.Sprintf(" | tail -n %d", o.Lines)) } - describe_nodes := fmt.Sprintf("sudo %s describe node -A --kubeconfig=%s", + describeNodes := fmt.Sprintf("sudo %s describe node -A --kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"), path.Join(vmpath.GuestPersistentDir, "kubeconfig")) return map[string]string{ "kubelet": kubelet.String(), "dmesg": dmesg.String(), - "describe nodes": describe_nodes, + "describe nodes": describeNodes, } } @@ -271,7 +271,7 @@ func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Dur return err } - if err := kverify.WaitForAPIServerProcess(cr, k, k.c, start, timeout); err != nil { + if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, start, timeout); err != nil { return err } @@ -285,7 +285,7 @@ func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Dur } } - if err := kverify.WaitForHealthyAPIServer(cr, k, k.c, start, ip, port, timeout); err != nil { + if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, start, ip, port, timeout); err != nil { return err } @@ -294,7 +294,7 @@ func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Dur return errors.Wrap(err, "get k8s client") } - if err := kverify.WaitForSystemPods(cr, k, k.c, c, start, timeout); err != nil { + if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, c, start, timeout); err != nil { return errors.Wrap(err, "waiting for system pods") } return nil @@ -347,7 +347,7 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { } // We must ensure that the apiserver is healthy before proceeding - if err := kverify.WaitForAPIServerProcess(cr, k, k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { + if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { return errors.Wrap(err, "apiserver healthz") } @@ -366,7 +366,7 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "getting k8s client") } - if err := kverify.WaitForSystemPods(cr, k, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { + if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { return errors.Wrap(err, "system pods") } From 90a6eb3da0f27a87cbd7f4fc934f8d380f331d67 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Fri, 20 Mar 2020 19:12:30 -0700 Subject: [PATCH 137/668] Recover from ssh restart --- pkg/minikube/machine/start.go | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index a885b0476e..fe94b92432 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -32,6 +32,7 @@ import ( "github.com/juju/mutex" "github.com/pkg/errors" "github.com/spf13/viper" + "golang.org/x/crypto/ssh" "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" @@ -43,6 +44,7 @@ import ( "k8s.io/minikube/pkg/minikube/sshutil" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util/lock" + "k8s.io/minikube/pkg/util/retry" ) var ( @@ -191,6 +193,7 @@ func postStartSetup(h *host.Host, mc config.ClusterConfig) error { } glog.Infof("creating required directories: %v", requiredDirectories) + r, err := commandRunner(h) if err != nil { return errors.Wrap(err, "command runner") @@ -229,11 +232,19 @@ func commandRunner(h *host.Host) (command.Runner, error) { } glog.Infof("Creating SSH client and returning SSHRunner for %q driver", d) - client, err := sshutil.NewSSHClient(h.Driver) - if err != nil { - return nil, errors.Wrap(err, "ssh client") + + // Retry in order to survive an ssh restart, which sometimes happens due to provisioning + var sc *ssh.Client + getSSH := func() (err error) { + sc, err = sshutil.NewSSHClient(h.Driver) + return err } - return command.NewSSHRunner(client), nil + + if err := retry.Expo(getSSH, 250*time.Millisecond, 2*time.Second); err != nil { + return nil, err + } + + return command.NewSSHRunner(sc), nil } // acquireMachinesLock protects against code that is not parallel-safe (libmachine, cert setup) From f90e59756d8d2cd5a64aa38545b838473b1b1551 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Fri, 20 Mar 2020 19:12:32 -0700 Subject: [PATCH 138/668] Ensure that provisionDockerMachine is always called: for old images, and for corrupted ones --- pkg/minikube/machine/fix.go | 15 ++++++--------- pkg/minikube/machine/machine.go | 9 +++++++++ 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 83e57c4174..8bf997b6f6 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -20,7 +20,6 @@ import ( "fmt" "math" "os" - "reflect" "strconv" "strings" "time" @@ -77,15 +76,13 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, existing config.Cluste return h, err } - old := engineOptions(existing) + // Technically, we should only have to call provision if Docker has changed, + // but who can predict what shape the existing VM is in. e := engineOptions(cc) - if !reflect.DeepEqual(old, e) { - glog.Infof("docker config changed, updating provisioner: %+v", e) - h.HostOptions.EngineOptions.Env = e.Env - err := provisionDockerMachine(h) - if err != nil { - return h, errors.Wrap(err, "provision") - } + h.HostOptions.EngineOptions.Env = e.Env + err = provisionDockerMachine(h) + if err != nil { + return h, errors.Wrap(err, "provision") } if driver.IsMock(h.DriverName) { diff --git a/pkg/minikube/machine/machine.go b/pkg/minikube/machine/machine.go index a9cba1e413..26470c3e7a 100644 --- a/pkg/minikube/machine/machine.go +++ b/pkg/minikube/machine/machine.go @@ -17,8 +17,11 @@ limitations under the License. package machine import ( + "time" + "github.com/docker/machine/libmachine/host" libprovision "github.com/docker/machine/libmachine/provision" + "github.com/golang/glog" "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/provision" @@ -80,6 +83,12 @@ func LoadMachine(name string) (*Machine, error) { // provisionDockerMachine provides fast provisioning of a docker machine func provisionDockerMachine(h *host.Host) error { + glog.Infof("provisioning docker machine ...") + start := time.Now() + defer func() { + glog.Infof("provisioned docker machine in %s", time.Since(start)) + }() + p, err := fastDetectProvisioner(h) if err != nil { return errors.Wrap(err, "fast detect") From 32d32dcc47e083113f41fc8b5001de6ba39427e7 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Fri, 20 Mar 2020 21:20:22 -0700 Subject: [PATCH 139/668] Also clear admin.conf on update --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 21 ++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 1c0268d779..cfa110d46a 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -165,13 +165,6 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { glog.Infof("StartCluster complete in %s", time.Since(start)) }() - // Remove admin.conf from any previous run - c := exec.Command("/bin/bash", "-c", "sudo rm -f /etc/kubernetes/admin.conf") - _, err = k.c.RunCmd(c) - if err != nil { - return errors.Wrap(err, "deleting admin.conf") - } - version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) if err != nil { return errors.Wrap(err, "parsing kubernetes version") @@ -209,7 +202,13 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } - c = exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), bsutil.KubeadmYamlPath, extraFlags, strings.Join(ignore, ","))) + // Remove the previous kubeadm kubeconfig as the IP may have changed + _, err = k.c.RunCmd(exec.Command("sudo", "rm", "-f", "/etc/kubernetes/admin.conf")) + if err != nil { + return errors.Wrap(err, "deleting admin.conf") + } + + c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), bsutil.KubeadmYamlPath, extraFlags, strings.Join(ignore, ","))) rr, err := k.c.RunCmd(c) if err != nil { return errors.Wrapf(err, "init failed. output: %q", rr.Output()) @@ -328,6 +327,12 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { glog.Errorf("failed to create compat symlinks: %v", err) } + // Remove the previous kubeadm kubeconfig as the IP may have changed + _, err = k.c.RunCmd(exec.Command("sudo", "rm", "-f", "/etc/kubernetes/admin.conf")) + if err != nil { + return errors.Wrap(err, "deleting admin.conf") + } + baseCmd := fmt.Sprintf("%s %s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), phase) cmds := []string{ fmt.Sprintf("%s phase certs all --config %s", baseCmd, bsutil.KubeadmYamlPath), From beadb8626e0c3c1b118426318190187dc3cdb6f4 Mon Sep 17 00:00:00 2001 From: Tacio Costa Date: Fri, 20 Mar 2020 03:52:00 -0300 Subject: [PATCH 140/668] Fix script to update kubernetes version looking for wrong files and regex --- hack/kubernetes_version/update_kubernetes_version.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hack/kubernetes_version/update_kubernetes_version.go b/hack/kubernetes_version/update_kubernetes_version.go index 87466c6249..6bd5325c21 100644 --- a/hack/kubernetes_version/update_kubernetes_version.go +++ b/hack/kubernetes_version/update_kubernetes_version.go @@ -52,18 +52,18 @@ func main() { } mode := info.Mode() - re := regexp.MustCompile(`var DefaultKubernetesVersion = .*`) - f := re.ReplaceAllString(string(cf), "var DefaultKubernetesVersion = \""+v+"\"") + re := regexp.MustCompile(`DefaultKubernetesVersion = \".*`) + f := re.ReplaceAllString(string(cf), "DefaultKubernetesVersion = \""+v+"\"") - re = regexp.MustCompile(`var NewestKubernetesVersion = .*`) - f = re.ReplaceAllString(f, "var NewestKubernetesVersion = \""+v+"\"") + re = regexp.MustCompile(`NewestKubernetesVersion = \".*`) + f = re.ReplaceAllString(f, "NewestKubernetesVersion = \""+v+"\"") if err := ioutil.WriteFile(constantsFile, []byte(f), mode); err != nil { fmt.Println(err) os.Exit(1) } - testData := "../../pkg/minikube/bootstrapper/kubeadm/testdata" + testData := "../../pkg/minikube/bootstrapper/bsutil/testdata" err = filepath.Walk(testData, func(path string, info os.FileInfo, err error) error { if err != nil { From eca69790a5d4c047bd545b5f168c098fb72af8ca Mon Sep 17 00:00:00 2001 From: Iso Kenta Date: Sat, 21 Mar 2020 18:27:57 +0900 Subject: [PATCH 141/668] =?UTF-8?q?bumpup=20helm-tiller=20v2.16.1=20?= =?UTF-8?q?=E2=86=92=20v2.16.3=20and=20add=20integration=20test=20for=20ti?= =?UTF-8?q?ller?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- deploy/addons/helm-tiller/helm-tiller-dp.tmpl | 2 +- test/integration/addons_test.go | 45 ++++++++++++++++++- 2 files changed, 45 insertions(+), 2 deletions(-) diff --git a/deploy/addons/helm-tiller/helm-tiller-dp.tmpl b/deploy/addons/helm-tiller/helm-tiller-dp.tmpl index 49ae46166e..deccc348a3 100644 --- a/deploy/addons/helm-tiller/helm-tiller-dp.tmpl +++ b/deploy/addons/helm-tiller/helm-tiller-dp.tmpl @@ -46,7 +46,7 @@ spec: value: kube-system - name: TILLER_HISTORY_MAX value: "0" - image: gcr.io/kubernetes-helm/tiller:v2.16.1 + image: gcr.io/kubernetes-helm/tiller:v2.16.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 3 diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index 2120ca4060..313fa01d15 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -40,7 +40,7 @@ func TestAddons(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) defer CleanupWithLogs(t, profile, cancel) - args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "-v=1", "--addons=ingress", "--addons=registry", "--addons=metrics-server"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "-v=1", "--addons=ingress", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Fatalf("%s failed: %v", rr.Args, err) @@ -55,6 +55,7 @@ func TestAddons(t *testing.T) { {"Registry", validateRegistryAddon}, {"Ingress", validateIngressAddon}, {"MetricsServer", validateMetricsServerAddon}, + {"HelmTiller", validateHelmTillerAddon}, } for _, tc := range tests { tc := tc @@ -249,3 +250,45 @@ func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile strin t.Errorf("%s failed: %v", rr.Args, err) } } + +func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) { + client, err := kapi.Client(profile) + if err != nil { + t.Fatalf("kubernetes client: %v", client) + } + + start := time.Now() + if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "tiller-deploy", Minutes(6)); err != nil { + t.Errorf("waiting for tiller-deploy deployment to stabilize: %v", err) + } + t.Logf("tiller-deploy stabilized in %s", time.Since(start)) + + if _, err := PodWait(ctx, t, profile, "kube-system", "app=helm", Minutes(6)); err != nil { + t.Fatalf("wait: %v", err) + } + + want := "Server: &version.Version" + // Test from inside the cluster (`helm version` use pod.list permission. we use tiller serviceaccount in kube-system to list pod) + checkHelmTiller := func() error { + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "helm-test", "--restart=Never", "--image=alpine/helm:2.16.3", "-it", "--namespace=kube-system", "--serviceaccount=tiller", "--", "version")) + if err != nil { + return err + } + if rr.Stderr.String() != "" { + t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr) + } + if !strings.Contains(rr.Stdout.String(), want) { + return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want) + } + return nil + } + + if err := retry.Expo(checkHelmTiller, 500*time.Millisecond, Minutes(2)); err != nil { + t.Errorf(err.Error()) + } + + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "helm-tiller", "--alsologtostderr", "-v=1")) + if err != nil { + t.Errorf("%s failed: %v", rr.Args, err) + } +} From 39fea36ea5958ff1d97dedd8de2ba3c1deeb9263 Mon Sep 17 00:00:00 2001 From: Zhongcheng Lao Date: Sat, 21 Mar 2020 17:51:42 +0800 Subject: [PATCH 142/668] Update Nvidia GPU plugin --- ...ce-plugin.yaml.tmpl => nvidia-gpu-device-plugin.yaml} | 9 +++------ pkg/minikube/assets/addons.go | 4 ++-- 2 files changed, 5 insertions(+), 8 deletions(-) rename deploy/addons/gpu/{nvidia-gpu-device-plugin.yaml.tmpl => nvidia-gpu-device-plugin.yaml} (84%) diff --git a/deploy/addons/gpu/nvidia-gpu-device-plugin.yaml.tmpl b/deploy/addons/gpu/nvidia-gpu-device-plugin.yaml similarity index 84% rename from deploy/addons/gpu/nvidia-gpu-device-plugin.yaml.tmpl rename to deploy/addons/gpu/nvidia-gpu-device-plugin.yaml index 96252d1ba5..d4ee2ead9c 100644 --- a/deploy/addons/gpu/nvidia-gpu-device-plugin.yaml.tmpl +++ b/deploy/addons/gpu/nvidia-gpu-device-plugin.yaml @@ -46,21 +46,18 @@ spec: hostPath: path: /dev containers: - - image: "{{default "k8s.gcr.io" .ImageRepository}}/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" - command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"] + - image: "nvidia/k8s-device-plugin:1.0.0-beta4" + command: ["/usr/bin/nvidia-device-plugin", "-logtostderr"] name: nvidia-gpu-device-plugin resources: requests: cpu: 50m memory: 10Mi - limits: - cpu: 50m - memory: 10Mi securityContext: privileged: true volumeMounts: - name: device-plugin - mountPath: /device-plugin + mountPath: /var/lib/kubelet/device-plugins - name: dev mountPath: /dev updateStrategy: diff --git a/pkg/minikube/assets/addons.go b/pkg/minikube/assets/addons.go index 4e20974058..852eb27252 100644 --- a/pkg/minikube/assets/addons.go +++ b/pkg/minikube/assets/addons.go @@ -296,11 +296,11 @@ var Addons = map[string]*Addon{ }, false, "nvidia-driver-installer"), "nvidia-gpu-device-plugin": NewAddon([]*BinAsset{ MustBinAsset( - "deploy/addons/gpu/nvidia-gpu-device-plugin.yaml.tmpl", + "deploy/addons/gpu/nvidia-gpu-device-plugin.yaml", vmpath.GuestAddonsDir, "nvidia-gpu-device-plugin.yaml", "0640", - true), + false), }, false, "nvidia-gpu-device-plugin"), "logviewer": NewAddon([]*BinAsset{ MustBinAsset( From c42ecf95e0c198fdd8de7fa7c95e1d1f448da7d6 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Sat, 21 Mar 2020 07:53:03 -0700 Subject: [PATCH 143/668] Download dependencies for non-Docker runtimes --- test/integration/aaa_download_only_test.go | 227 +++++++++++---------- 1 file changed, 115 insertions(+), 112 deletions(-) diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index 3eda60e8cf..300c9323c1 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -40,138 +40,141 @@ import ( ) func TestDownloadOnly(t *testing.T) { - profile := UniqueProfileName("download") - ctx, cancel := context.WithTimeout(context.Background(), Minutes(15)) - defer Cleanup(t, profile, cancel) + for _, r := range []string{"crio", "docker", "containerd"} { + t.Run(r, func(t *testing.T) { + // Stores the startup run result for later error messages + var rrr *RunResult + var err error - // Stores the startup run result for later error messages - var rrr *RunResult - var err error + profile := UniqueProfileName(r) + ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) + defer Cleanup(t, profile, cancel) - t.Run("group", func(t *testing.T) { - versions := []string{ - constants.OldestKubernetesVersion, - constants.DefaultKubernetesVersion, - constants.NewestKubernetesVersion, - } - for _, v := range versions { - t.Run(v, func(t *testing.T) { - // Explicitly does not pass StartArgs() to test driver default - // --force to avoid uid check - args := append([]string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr", fmt.Sprintf("--kubernetes-version=%s", v)}, StartArgs()...) + versions := []string{ + constants.OldestKubernetesVersion, + constants.DefaultKubernetesVersion, + constants.NewestKubernetesVersion, + } - // Preserve the initial run-result for debugging - if rrr == nil { - rrr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) - } else { - _, err = Run(t, exec.CommandContext(ctx, Target(), args...)) - } + for _, v := range versions { + t.Run(v, func(t *testing.T) { + // Explicitly does not pass StartArgs() to test driver default + // --force to avoid uid check + args := append([]string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr", fmt.Sprintf("--kubernetes-version=%s", v), fmt.Sprintf("--container-runtime=%s", r)}, StartArgs()...) - if err != nil { - t.Errorf("%s failed: %v", args, err) - } - - if download.PreloadExists(v, "docker") { - // Just make sure the tarball path exists - if _, err := os.Stat(download.TarballPath(v)); err != nil { - t.Errorf("preloaded tarball path doesn't exist: %v", err) + // Preserve the initial run-result for debugging + if rrr == nil { + rrr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) + } else { + _, err = Run(t, exec.CommandContext(ctx, Target(), args...)) } - return - } - imgs, err := images.Kubeadm("", v) - if err != nil { - t.Errorf("kubeadm images: %v %+v", v, err) - } + if err != nil { + t.Errorf("%s failed: %v", args, err) + } - // skip verify for cache images if --driver=none - if !NoneDriver() { - for _, img := range imgs { - img = strings.Replace(img, ":", "_", 1) // for example kube-scheduler:v1.15.2 --> kube-scheduler_v1.15.2 - fp := filepath.Join(localpath.MiniPath(), "cache", "images", img) - _, err := os.Stat(fp) - if err != nil { - t.Errorf("expected image file exist at %q but got error: %v", fp, err) + if download.PreloadExists(v, "docker") { + // Just make sure the tarball path exists + if _, err := os.Stat(download.TarballPath(v)); err != nil { + t.Errorf("preloaded tarball path doesn't exist: %v", err) + } + return + } + + imgs, err := images.Kubeadm("", v) + if err != nil { + t.Errorf("kubeadm images: %v %+v", v, err) + } + + // skip verify for cache images if --driver=none + if !NoneDriver() { + for _, img := range imgs { + img = strings.Replace(img, ":", "_", 1) // for example kube-scheduler:v1.15.2 --> kube-scheduler_v1.15.2 + fp := filepath.Join(localpath.MiniPath(), "cache", "images", img) + _, err := os.Stat(fp) + if err != nil { + t.Errorf("expected image file exist at %q but got error: %v", fp, err) + } } } + + // checking binaries downloaded (kubelet,kubeadm) + for _, bin := range constants.KubernetesReleaseBinaries { + fp := filepath.Join(localpath.MiniPath(), "cache", "linux", v, bin) + _, err := os.Stat(fp) + if err != nil { + t.Errorf("expected the file for binary exist at %q but got error %v", fp, err) + } + } + + // If we are on darwin/windows, check to make sure OS specific kubectl has been downloaded + // as well for the `minikube kubectl` command + if runtime.GOOS == "linux" { + return + } + binary := "kubectl" + if runtime.GOOS == "windows" { + binary = "kubectl.exe" + } + fp := filepath.Join(localpath.MiniPath(), "cache", runtime.GOOS, v, binary) + if _, err := os.Stat(fp); err != nil { + t.Errorf("expected the file for binary exist at %q but got error %v", fp, err) + } + }) + } + + // Check that the profile we've created has the expected driver + t.Run("ExpectedDefaultDriver", func(t *testing.T) { + if ExpectedDefaultDriver() == "" { + t.Skipf("--expected-default-driver is unset, skipping test") + return + } + rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) + if err != nil { + t.Errorf("%s failed: %v", rr.Args, err) + } + var ps map[string][]config.Profile + err = json.Unmarshal(rr.Stdout.Bytes(), &ps) + if err != nil { + t.Errorf("%s failed: %v", rr.Args, err) } - // checking binaries downloaded (kubelet,kubeadm) - for _, bin := range constants.KubernetesReleaseBinaries { - fp := filepath.Join(localpath.MiniPath(), "cache", "linux", v, bin) - _, err := os.Stat(fp) - if err != nil { - t.Errorf("expected the file for binary exist at %q but got error %v", fp, err) + got := "" + for _, p := range ps["valid"] { + if p.Name == profile { + got = p.Config.Driver } } - // If we are on darwin/windows, check to make sure OS specific kubectl has been downloaded - // as well for the `minikube kubectl` command - if runtime.GOOS == "linux" { - return - } - binary := "kubectl" - if runtime.GOOS == "windows" { - binary = "kubectl.exe" - } - fp := filepath.Join(localpath.MiniPath(), "cache", runtime.GOOS, v, binary) - if _, err := os.Stat(fp); err != nil { - t.Errorf("expected the file for binary exist at %q but got error %v", fp, err) + if got != ExpectedDefaultDriver() { + t.Errorf("got driver %q, expected %q\nstart output: %s", got, ExpectedDefaultDriver(), rrr.Output()) } }) - } - // Check that the profile we've created has the expected driver - t.Run("ExpectedDefaultDriver", func(t *testing.T) { - if ExpectedDefaultDriver() == "" { - t.Skipf("--expected-default-driver is unset, skipping test") - return - } - rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) - if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) - } - var ps map[string][]config.Profile - err = json.Unmarshal(rr.Stdout.Bytes(), &ps) - if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) - } - - got := "" - for _, p := range ps["valid"] { - if p.Name == profile { - got = p.Config.Driver + // This is a weird place to test profile deletion, but this test is serial, and we have a profile to delete! + t.Run("DeleteAll", func(t *testing.T) { + if !CanCleanup() { + t.Skip("skipping, as cleanup is disabled") } - } - - if got != ExpectedDefaultDriver() { - t.Errorf("got driver %q, expected %q\nstart output: %s", got, ExpectedDefaultDriver(), rrr.Output()) - } + rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all")) + if err != nil { + t.Errorf("%s failed: %v", rr.Args, err) + } + }) + // Delete should always succeed, even if previously partially or fully deleted. + t.Run("DeleteAlwaysSucceeds", func(t *testing.T) { + if !CanCleanup() { + t.Skip("skipping, as cleanup is disabled") + } + rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) + if err != nil { + t.Errorf("%s failed: %v", rr.Args, err) + } + }) }) - - // This is a weird place to test profile deletion, but this test is serial, and we have a profile to delete! - t.Run("DeleteAll", func(t *testing.T) { - if !CanCleanup() { - t.Skip("skipping, as cleanup is disabled") - } - rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all")) - if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) - } - }) - // Delete should always succeed, even if previously partially or fully deleted. - t.Run("DeleteAlwaysSucceeds", func(t *testing.T) { - if !CanCleanup() { - t.Skip("skipping, as cleanup is disabled") - } - rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) - if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) - } - }) - }) - + } } + func TestDownloadOnlyDocker(t *testing.T) { if !runningDockerDriver(StartArgs()) { t.Skip("this test only runs with the docker driver") From 66e7acd39a7c36fe295cb1d6ffcca4f5aab18bd8 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Sat, 21 Mar 2020 08:15:05 -0700 Subject: [PATCH 144/668] Fix merge regression, add localpath functions for certs --- pkg/minikube/bootstrapper/certs.go | 6 +++--- pkg/minikube/localpath/localpath.go | 15 +++++++++++++++ pkg/minikube/node/start.go | 6 +++--- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index e752b732a7..de938082cc 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -139,7 +139,7 @@ type CACerts struct { func generateSharedCACerts() (CACerts, error) { globalPath := localpath.MiniPath() cc := CACerts{ - caCert: filepath.Join(globalPath, "ca.crt"), + caCert: localpath.CACert(), caKey: filepath.Join(globalPath, "ca.key"), proxyCert: filepath.Join(globalPath, "proxy-client-ca.crt"), proxyKey: filepath.Join(globalPath, "proxy-client-ca.key"), @@ -214,8 +214,8 @@ func generateProfileCerts(k8s config.KubernetesConfig, n config.Node, ccs CACert caKeyPath string }{ { // Client cert - certPath: filepath.Join(profilePath, "client.crt"), - keyPath: filepath.Join(profilePath, "client.key"), + certPath: localpath.ClientCert(k8s.ClusterName), + keyPath: localpath.ClientKey(k8s.ClusterName), subject: "minikube-user", ips: []net.IP{}, alternateNames: []string{}, diff --git a/pkg/minikube/localpath/localpath.go b/pkg/minikube/localpath/localpath.go index 3dd6d4158c..6bc9ef1239 100644 --- a/pkg/minikube/localpath/localpath.go +++ b/pkg/minikube/localpath/localpath.go @@ -59,6 +59,21 @@ func Profile(name string) string { return filepath.Join(MiniPath(), "profiles", name) } +// ClientCert returns client certificate path, used by kubeconfig +func ClientCert(name string) string { + return filepath.Join(Profile(name), "client.crt") +} + +// ClientKey returns client certificate path, used by kubeconfig +func ClientKey(name string) string { + return filepath.Join(Profile(name), "client.key") +} + +// CACert returns the minikube CA certificate shared between profiles +func CACert() string { + return filepath.Join(MiniPath(), "ca.crt") +} + // MachinePath returns the Minikube machine path of a machine func MachinePath(machine string, miniHome ...string) string { miniPath := MiniPath() diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index b46469837e..d69bad75f7 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -250,9 +250,9 @@ func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clu kcs := &kubeconfig.Settings{ ClusterName: clusterName, ClusterServerAddress: addr, - ClientCertificate: localpath.MakeMiniPath("client.crt"), - ClientKey: localpath.MakeMiniPath("client.key"), - CertificateAuthority: localpath.MakeMiniPath("ca.crt"), + ClientCertificate: localpath.ClientCert(cc.Name), + ClientKey: localpath.ClientKey(cc.Name), + CertificateAuthority: localpath.CACert(), KeepContext: viper.GetBool(keepContext), EmbedCerts: viper.GetBool(embedCerts), } From d75dc6a2294853be85cc6e78411704ab4acb9c6b Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Sat, 21 Mar 2020 10:22:45 -0700 Subject: [PATCH 145/668] Remove broken ExpectedDefaultDriver test, fix call to PreloadExists --- hack/jenkins/common.sh | 1 - .../windows_integration_test_hyperv.ps1 | 2 +- .../windows_integration_test_virtualbox.ps1 | 2 +- test/integration/aaa_download_only_test.go | 32 +------------------ test/integration/main.go | 1 - 5 files changed, 3 insertions(+), 35 deletions(-) diff --git a/hack/jenkins/common.sh b/hack/jenkins/common.sh index 404b7aa4f9..8b908d3d73 100755 --- a/hack/jenkins/common.sh +++ b/hack/jenkins/common.sh @@ -286,7 +286,6 @@ fi touch "${TEST_OUT}" ${SUDO_PREFIX}${E2E_BIN} \ -minikube-start-args="--driver=${VM_DRIVER} ${EXTRA_START_ARGS}" \ - -expected-default-driver="${EXPECTED_DEFAULT_DRIVER}" \ -test.timeout=70m -test.v \ ${EXTRA_TEST_ARGS} \ -binary="${MINIKUBE_BIN}" 2>&1 | tee "${TEST_OUT}" diff --git a/hack/jenkins/windows_integration_test_hyperv.ps1 b/hack/jenkins/windows_integration_test_hyperv.ps1 index f1c4db8b9d..536c4e35cc 100644 --- a/hack/jenkins/windows_integration_test_hyperv.ps1 +++ b/hack/jenkins/windows_integration_test_hyperv.ps1 @@ -19,7 +19,7 @@ gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata . ./out/minikube-windows-amd64.exe delete -out/e2e-windows-amd64.exe --expected-default-driver=hyperv -minikube-start-args="--driver=hyperv --hyperv-virtual-switch=primary-virtual-switch" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m +out/e2e-windows-amd64.exe -minikube-start-args="--driver=hyperv --hyperv-virtual-switch=primary-virtual-switch" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m $env:result=$lastexitcode # If the last exit code was 0->success, x>0->error If($env:result -eq 0){$env:status="success"} diff --git a/hack/jenkins/windows_integration_test_virtualbox.ps1 b/hack/jenkins/windows_integration_test_virtualbox.ps1 index 6d9c7f318b..2f5957301f 100644 --- a/hack/jenkins/windows_integration_test_virtualbox.ps1 +++ b/hack/jenkins/windows_integration_test_virtualbox.ps1 @@ -19,7 +19,7 @@ gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata . ./out/minikube-windows-amd64.exe delete -out/e2e-windows-amd64.exe -minikube-start-args="--driver=virtualbox" -expected-default-driver=hyperv -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=30m +out/e2e-windows-amd64.exe -minikube-start-args="--driver=virtualbox" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=30m $env:result=$lastexitcode # If the last exit code was 0->success, x>0->error If($env:result -eq 0){$env:status="success"} diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index 300c9323c1..9640212800 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -21,7 +21,6 @@ package integration import ( "context" "crypto/md5" - "encoding/json" "fmt" "io/ioutil" "os" @@ -33,7 +32,6 @@ import ( "time" "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/localpath" @@ -73,7 +71,7 @@ func TestDownloadOnly(t *testing.T) { t.Errorf("%s failed: %v", args, err) } - if download.PreloadExists(v, "docker") { + if download.PreloadExists(v, r) { // Just make sure the tarball path exists if _, err := os.Stat(download.TarballPath(v)); err != nil { t.Errorf("preloaded tarball path doesn't exist: %v", err) @@ -123,34 +121,6 @@ func TestDownloadOnly(t *testing.T) { }) } - // Check that the profile we've created has the expected driver - t.Run("ExpectedDefaultDriver", func(t *testing.T) { - if ExpectedDefaultDriver() == "" { - t.Skipf("--expected-default-driver is unset, skipping test") - return - } - rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) - if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) - } - var ps map[string][]config.Profile - err = json.Unmarshal(rr.Stdout.Bytes(), &ps) - if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) - } - - got := "" - for _, p := range ps["valid"] { - if p.Name == profile { - got = p.Config.Driver - } - } - - if got != ExpectedDefaultDriver() { - t.Errorf("got driver %q, expected %q\nstart output: %s", got, ExpectedDefaultDriver(), rrr.Output()) - } - }) - // This is a weird place to test profile deletion, but this test is serial, and we have a profile to delete! t.Run("DeleteAll", func(t *testing.T) { if !CanCleanup() { diff --git a/test/integration/main.go b/test/integration/main.go index 33c5e09618..54a644518c 100644 --- a/test/integration/main.go +++ b/test/integration/main.go @@ -27,7 +27,6 @@ import ( // General configuration: used to set the VM Driver var startArgs = flag.String("minikube-start-args", "", "Arguments to pass to minikube start") -var defaultDriver = flag.String("expected-default-driver", "", "Expected default driver") // Flags for faster local integration testing var forceProfile = flag.String("profile", "", "force tests to run against a particular profile") From e6948b90b4aa9a7047cb4be2303d7425c8938c8c Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Sat, 21 Mar 2020 10:47:08 -0700 Subject: [PATCH 146/668] Trim crio prefix, dedup results --- test/integration/start_stop_delete_test.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index c891309a69..38ca1d9b9a 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -260,12 +260,14 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version if err != nil { t.Errorf("images unmarshal: %v", err) } - gotImages := []string{} + found := map[string]bool{} for _, img := range jv["images"] { for _, i := range img.Tags { + // Remove container-specific prefixes for naming consistency + i = strings.TrimPrefix(i, "docker.io/") + i = strings.TrimPrefix(i, "localhost/") if defaultImage(i) { - // Remove docker.io for naming consistency between container runtimes - gotImages = append(gotImages, strings.TrimPrefix(i, "docker.io/")) + found[i] = true } else { t.Logf("Found non-minikube image: %s", i) } @@ -275,6 +277,10 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version if err != nil { t.Errorf("kubeadm images: %v", version) } + gotImages := []string{} + for k := range found { + gotImages = append(gotImages, k) + } sort.Strings(want) sort.Strings(gotImages) if diff := cmp.Diff(want, gotImages); diff != "" { From 17e6d84c3f5bf34537fc7153d2e5a0d9a0467a7e Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Sat, 21 Mar 2020 10:47:58 -0700 Subject: [PATCH 147/668] Remove unused code --- test/integration/main.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/test/integration/main.go b/test/integration/main.go index 54a644518c..3c159a6e42 100644 --- a/test/integration/main.go +++ b/test/integration/main.go @@ -68,11 +68,6 @@ func HyperVDriver() bool { return strings.Contains(*startArgs, "--driver=hyperv") } -// ExpectedDefaultDriver returns the expected default driver, if any -func ExpectedDefaultDriver() string { - return *defaultDriver -} - // CanCleanup returns if cleanup is allowed func CanCleanup() bool { return *cleanup From 314692a03825f9eb9e49702ff16f22ef3693d596 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Sat, 21 Mar 2020 11:09:59 -0700 Subject: [PATCH 148/668] Version bump: v1.9.0-beta.2 --- CHANGELOG.md | 36 ++++++++++++++++++++++++++++++++++++ Makefile | 2 +- 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 835ccf9552..baebf87467 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,41 @@ # Release Notes +## Version 1.9.0-beta.2 - 2020-03-21 + +New features & improvements + +* Add experimental multi-node support 🎉 [#6787](https://github.com/kubernetes/minikube/pull/6787) +* Skip kubeadm if cluster is running & properly configured [#7124](https://github.com/kubernetes/minikube/pull/7124) +* Make certificates per-profile and consistent until IP or names change [#7125](https://github.com/kubernetes/minikube/pull/7125) +* bumpup helm-tiller v2.16.1 → v2.16.3 [#7130](https://github.com/kubernetes/minikube/pull/7130) +* Update Nvidia GPU plugin [#7132](https://github.com/kubernetes/minikube/pull/7132) +* bumpup istio & istio-provisoner addon 1.4.0 → 1.5.0 [#7120](https://github.com/kubernetes/minikube/pull/7120) +* New addon: registry-aliases [#6657](https://github.com/kubernetes/minikube/pull/6657) +* Upgrade buildroot minor version [#7101](https://github.com/kubernetes/minikube/pull/7101) + +Bugfixes + +* Prevent crash if namespace or service doesn't exist [#5844](https://github.com/kubernetes/minikube/pull/5844) +* Only run deleteHosts if running a VM [#7110](https://github.com/kubernetes/minikube/pull/7110) +* provisioner: only reload docker if necessary, don't install curl [#7115](https://github.com/kubernetes/minikube/pull/7115) +* Improve error when docker-env is used with non-docker runtime [#7112](https://github.com/kubernetes/minikube/pull/7112) +* Add warning if both vm-driver and driver are specified [#7109](https://github.com/kubernetes/minikube/pull/7109) + +Thank you to our contributors: + +- Anders F Björklund +- Iso Kenta +- Kamesh Sampath +- Kenta Iso +- Priya Wadhwa +- Sharif Elgamal +- Tacio Costa +- Thomas Stromberg +- Thomas Strömberg +- Zhongcheng Lao +- rajula96reddy +- sayboras + ## Version 1.9.0-beta.1 - 2020-03-18 New features diff --git a/Makefile b/Makefile index f1256f8eb5..f7b45a07ed 100755 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ # Bump these on release - and please check ISO_VERSION for correctness. VERSION_MAJOR ?= 1 VERSION_MINOR ?= 9 -VERSION_BUILD ?= 0-beta.1 +VERSION_BUILD ?= 0-beta.2 RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD) VERSION ?= v$(RAW_VERSION) From ae9fd2a1e6b86926c22223d9773a7145793b35a6 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Sat, 21 Mar 2020 11:10:48 -0700 Subject: [PATCH 149/668] Remove dupe --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index baebf87467..e0c8f2efe0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,7 +30,6 @@ Thank you to our contributors: - Priya Wadhwa - Sharif Elgamal - Tacio Costa -- Thomas Stromberg - Thomas Strömberg - Zhongcheng Lao - rajula96reddy From 151db5ffe63dd84b726a02216704ff1018475031 Mon Sep 17 00:00:00 2001 From: Zhongcheng Lao Date: Sat, 21 Mar 2020 19:27:38 +0800 Subject: [PATCH 150/668] Use hostname as node name for 'none' driver --- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 2 +- pkg/minikube/bootstrapper/bsutil/kubelet.go | 5 +++-- pkg/minikube/driver/driver.go | 10 ++++++++++ 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 8b675ae644..6fc923c855 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -94,7 +94,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana EtcdDataDir: EtcdDataDir(), ClusterName: cc.Name, //kubeadm uses NodeName as the --hostname-override parameter, so this needs to be the name of the machine - NodeName: driver.MachineName(cc, n), + NodeName: driver.KubeNodeName(cc, n), CRISocket: r.SocketPath(), ImageRepository: k8s.ImageRepository, ComponentOptions: componentOpts, diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index ce161b41da..3f22f8fbc7 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -60,8 +60,9 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage if _, ok := extraOpts["node-ip"]; !ok { extraOpts["node-ip"] = cp.IP } - if nc.Name != "" { - extraOpts["hostname-override"] = driver.MachineName(mc, nc) + nodeName := driver.KubeNodeName(mc, nc) + if nodeName != "" { + extraOpts["hostname-override"] = nodeName } pauseImage := images.Pause(version, k8s.ImageRepository) diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index b6106474d8..54f379439b 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -237,6 +237,16 @@ func MachineName(cc config.ClusterConfig, n config.Node) string { return fmt.Sprintf("%s---%s", cc.Name, n.Name) } +// KubeNodeName returns the node name registered in Kubernetes +func KubeNodeName(cc config.ClusterConfig, n config.Node) string { + if cc.Driver == None { + // Always use hostname for "none" driver + hostname, _ := os.Hostname() + return hostname + } + return MachineName(cc, n) +} + // ClusterNameFromMachine retrieves the cluster name embedded in the machine name func ClusterNameFromMachine(name string) (string, string) { if strings.Contains(name, "---") { From ba6dbc57c233cfcdba5f18f43dc0d813854c7ccf Mon Sep 17 00:00:00 2001 From: Prasad Katti Date: Sat, 21 Mar 2020 12:18:50 -0700 Subject: [PATCH 151/668] Update the call to logProblems in node/start.go --- pkg/minikube/node/start.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index b46469837e..21a65d15ab 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -112,7 +112,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo bs = setupKubeAdm(machineAPI, cc, n) err = bs.StartCluster(cc) if err != nil { - exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) + exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, cc, mRunner)) } } else { bs, err = cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n) From a59750627e71a8e7190b245ead5599c2bb2ca8aa Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Sat, 21 Mar 2020 15:51:31 -0700 Subject: [PATCH 152/668] Improve stale endpoint detection and resolution in Kubernetes configs --- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 2 - pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 62 ++++++++++++++------ 2 files changed, 44 insertions(+), 20 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 8b675ae644..67187d6109 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -102,8 +102,6 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana NoTaintMaster: false, // That does not work with k8s 1.12+ DNSDomain: k8s.DNSDomain, NodeIP: n.IP, - // NOTE: If set to an specific VM IP, things may break if the IP changes on host restart - // For multi-node, we may need to figure out an alternate strategy, like DNS or hosts files ControlPlaneAddress: cp.IP, } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index e859511099..a6c1bbe3dd 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -110,7 +110,7 @@ func (k *Bootstrapper) GetAPIServerStatus(ip net.IP, port int) (string, error) { } // LogCommands returns a map of log type to a command which will display that log. -func (k *Bootstrapper) LogCommands(o bootstrapper.LogOptions) map[string]string { +func (k *Bootstrapper) LogCommands(cfg config.ClusterConfig, o bootstrapper.LogOptions) map[string]string { var kubelet strings.Builder kubelet.WriteString("sudo journalctl -u kubelet") if o.Lines > 0 { @@ -128,9 +128,15 @@ func (k *Bootstrapper) LogCommands(o bootstrapper.LogOptions) map[string]string if o.Lines > 0 { dmesg.WriteString(fmt.Sprintf(" | tail -n %d", o.Lines)) } + + describeNodes := fmt.Sprintf("sudo %s describe node -A --kubeconfig=%s", + path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"), + path.Join(vmpath.GuestPersistentDir, "kubeconfig")) + return map[string]string{ - "kubelet": kubelet.String(), - "dmesg": dmesg.String(), + "kubelet": kubelet.String(), + "dmesg": dmesg.String(), + "describe nodes": describeNodes, } } @@ -151,6 +157,30 @@ func (k *Bootstrapper) createCompatSymlinks() error { return nil } +// clearStaleConfigs clears configurations which may have stale IP addresses +func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error { + cp, err := config.PrimaryControlPlane(&cfg) + if err != nil { + return err + } + + paths := []string{ + "/etc/kubernetes/admin.conf", + "/etc/kubernetes/kubelet.conf", + "/etc/kubernetes/controller-manager.conf", + "/etc/kubernetes/scheduler.conf", + } + + endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(cp.IP, strconv.Itoa(cp.Port))) + for _, path := range paths { + _, err := k.c.RunCmd(exec.Command("sudo", "/bin/bash", "-c", fmt.Sprintf("grep %s %s || sudo rm -f %s", endpoint, path, path))) + if err != nil { + return err + } + } + return nil +} + // StartCluster starts the cluster func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { err := bsutil.ExistingConfig(k.c) @@ -202,10 +232,8 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } - // Remove the previous kubeadm kubeconfig as the IP may have changed - _, err = k.c.RunCmd(exec.Command("sudo", "rm", "-f", "/etc/kubernetes/admin.conf")) - if err != nil { - return errors.Wrap(err, "deleting admin.conf") + if err := k.clearStaleConfigs(cfg); err != nil { + return errors.Wrap(err, "clearing stale configs") } conf := bsutil.KubeadmYamlPath @@ -287,7 +315,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time return err } - if err := kverify.WaitForAPIServerProcess(cr, k, k.c, start, timeout); err != nil { + if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, start, timeout); err != nil { return err } @@ -296,7 +324,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time return err } - if err := kverify.WaitForHealthyAPIServer(cr, k, k.c, start, ip, port, timeout); err != nil { + if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, start, ip, port, timeout); err != nil { return err } @@ -305,7 +333,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time return errors.Wrap(err, "get k8s client") } - if err := kverify.WaitForSystemPods(cr, k, k.c, c, start, timeout); err != nil { + if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, c, start, timeout); err != nil { return errors.Wrap(err, "waiting for system pods") } return nil @@ -313,8 +341,8 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time // needsReset returns whether or not the cluster needs to be reconfigured func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kubernetes.Clientset) bool { - if _, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")); err != nil { - glog.Infof("needs reset: configs differ") + if rr, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")); err != nil { + glog.Infof("needs reset: configs differ:\n%s", rr.Output()) return true } @@ -378,10 +406,8 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { return nil } - // Remove the previous kubeadm kubeconfig as the IP may have changed - _, err = k.c.RunCmd(exec.Command("sudo", "rm", "-f", "/etc/kubernetes/admin.conf")) - if err != nil { - return errors.Wrap(err, "deleting admin.conf") + if err := k.clearStaleConfigs(cfg); err != nil { + return errors.Wrap(err, "clearing stale configs") } if _, err := k.c.RunCmd(exec.Command("sudo", "mv", conf+".new", conf)); err != nil { @@ -411,11 +437,11 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { } // We must ensure that the apiserver is healthy before proceeding - if err := kverify.WaitForAPIServerProcess(cr, k, k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { + if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { return errors.Wrap(err, "apiserver healthz") } - if err := kverify.WaitForSystemPods(cr, k, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { + if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { return errors.Wrap(err, "system pods") } From f76cdea82a94707c1f8cea24f2b2bd6e3b1380fa Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Sat, 21 Mar 2020 16:05:00 -0700 Subject: [PATCH 153/668] Run gofmt --- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 67187d6109..b2af8d8a90 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -94,14 +94,14 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana EtcdDataDir: EtcdDataDir(), ClusterName: cc.Name, //kubeadm uses NodeName as the --hostname-override parameter, so this needs to be the name of the machine - NodeName: driver.MachineName(cc, n), - CRISocket: r.SocketPath(), - ImageRepository: k8s.ImageRepository, - ComponentOptions: componentOpts, - FeatureArgs: kubeadmFeatureArgs, - NoTaintMaster: false, // That does not work with k8s 1.12+ - DNSDomain: k8s.DNSDomain, - NodeIP: n.IP, + NodeName: driver.MachineName(cc, n), + CRISocket: r.SocketPath(), + ImageRepository: k8s.ImageRepository, + ComponentOptions: componentOpts, + FeatureArgs: kubeadmFeatureArgs, + NoTaintMaster: false, // That does not work with k8s 1.12+ + DNSDomain: k8s.DNSDomain, + NodeIP: n.IP, ControlPlaneAddress: cp.IP, } From 373364d6257c0462675eafb074e10037a54a00fc Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Sat, 21 Mar 2020 18:47:26 -0700 Subject: [PATCH 154/668] Update to include last minute changes --- CHANGELOG.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e0c8f2efe0..fe559157a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,22 +4,22 @@ New features & improvements -* Add experimental multi-node support 🎉 [#6787](https://github.com/kubernetes/minikube/pull/6787) -* Skip kubeadm if cluster is running & properly configured [#7124](https://github.com/kubernetes/minikube/pull/7124) -* Make certificates per-profile and consistent until IP or names change [#7125](https://github.com/kubernetes/minikube/pull/7125) +* 🎉 Experimental multi-node support 🎊 [#6787](https://github.com/kubernetes/minikube/pull/6787) +* Add kubectl desc nodes to minikube logs [#7105](https://github.com/kubernetes/minikube/pull/7105) * bumpup helm-tiller v2.16.1 → v2.16.3 [#7130](https://github.com/kubernetes/minikube/pull/7130) * Update Nvidia GPU plugin [#7132](https://github.com/kubernetes/minikube/pull/7132) * bumpup istio & istio-provisoner addon 1.4.0 → 1.5.0 [#7120](https://github.com/kubernetes/minikube/pull/7120) * New addon: registry-aliases [#6657](https://github.com/kubernetes/minikube/pull/6657) * Upgrade buildroot minor version [#7101](https://github.com/kubernetes/minikube/pull/7101) +* Skip kubeadm if cluster is running & properly configured [#7124](https://github.com/kubernetes/minikube/pull/7124) +* Make certificates per-profile and consistent until IP or names change [#7125](https://github.com/kubernetes/minikube/pull/7125) Bugfixes -* Prevent crash if namespace or service doesn't exist [#5844](https://github.com/kubernetes/minikube/pull/5844) -* Only run deleteHosts if running a VM [#7110](https://github.com/kubernetes/minikube/pull/7110) -* provisioner: only reload docker if necessary, don't install curl [#7115](https://github.com/kubernetes/minikube/pull/7115) -* Improve error when docker-env is used with non-docker runtime [#7112](https://github.com/kubernetes/minikube/pull/7112) +* Prevent minikube from crashing if namespace or service doesn't exist [#5844](https://github.com/kubernetes/minikube/pull/5844) * Add warning if both vm-driver and driver are specified [#7109](https://github.com/kubernetes/minikube/pull/7109) +* Improve error when docker-env is used with non-docker runtime [#7112](https://github.com/kubernetes/minikube/pull/7112) +* provisioner: only reload docker if necessary, don't install curl [#7115](https://github.com/kubernetes/minikube/pull/7115) Thank you to our contributors: @@ -27,6 +27,7 @@ Thank you to our contributors: - Iso Kenta - Kamesh Sampath - Kenta Iso +- Prasad Katti - Priya Wadhwa - Sharif Elgamal - Tacio Costa From 6ff82c6246d2a7578df1fbe9425869e8fffae002 Mon Sep 17 00:00:00 2001 From: Zhongcheng Lao Date: Sun, 22 Mar 2020 14:31:37 +0800 Subject: [PATCH 155/668] Make node name registered in K8s part of Node type --- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 3 +-- pkg/minikube/bootstrapper/bsutil/kubelet.go | 3 +-- pkg/minikube/config/types.go | 12 ++++++++++++ pkg/minikube/driver/driver.go | 10 ---------- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index bcc363c9da..55cec06303 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -29,7 +29,6 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util" ) @@ -94,7 +93,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana EtcdDataDir: EtcdDataDir(), ClusterName: cc.Name, //kubeadm uses NodeName as the --hostname-override parameter, so this needs to be the name of the machine - NodeName: driver.KubeNodeName(cc, n), + NodeName: n.InternalName(), CRISocket: r.SocketPath(), ImageRepository: k8s.ImageRepository, ComponentOptions: componentOpts, diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index 3f22f8fbc7..06e52d0ed2 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -26,7 +26,6 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/util" ) @@ -60,7 +59,7 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage if _, ok := extraOpts["node-ip"]; !ok { extraOpts["node-ip"] = cp.IP } - nodeName := driver.KubeNodeName(mc, nc) + nodeName := nc.InternalName() if nodeName != "" { extraOpts["hostname-override"] = nodeName } diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index 97c2a13039..af1dc66773 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -18,6 +18,7 @@ package config import ( "net" + "os" "github.com/blang/semver" ) @@ -102,6 +103,17 @@ type Node struct { Worker bool } +// returns the name to be registered kubernetes +func (n Node) InternalName() string { + if n.Name == "" { + // Always use hostname for "none" driver + hostname, _ := os.Hostname() + return hostname + } + + return n.Name +} + // VersionedExtraOption holds information on flags to apply to a specific range // of versions type VersionedExtraOption struct { diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index 54f379439b..b6106474d8 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -237,16 +237,6 @@ func MachineName(cc config.ClusterConfig, n config.Node) string { return fmt.Sprintf("%s---%s", cc.Name, n.Name) } -// KubeNodeName returns the node name registered in Kubernetes -func KubeNodeName(cc config.ClusterConfig, n config.Node) string { - if cc.Driver == None { - // Always use hostname for "none" driver - hostname, _ := os.Hostname() - return hostname - } - return MachineName(cc, n) -} - // ClusterNameFromMachine retrieves the cluster name embedded in the machine name func ClusterNameFromMachine(name string) (string, string) { if strings.Contains(name, "---") { From 15b1647b42c1bcbf8365115bd8b7bfb8f772d7d3 Mon Sep 17 00:00:00 2001 From: Zhongcheng Lao Date: Sun, 22 Mar 2020 15:06:38 +0800 Subject: [PATCH 156/668] Revert "Make node name registered in K8s part of Node type" This reverts commit 6ff82c6246d2a7578df1fbe9425869e8fffae002. --- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 3 ++- pkg/minikube/bootstrapper/bsutil/kubelet.go | 3 ++- pkg/minikube/config/types.go | 12 ------------ pkg/minikube/driver/driver.go | 10 ++++++++++ 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 55cec06303..bcc363c9da 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -29,6 +29,7 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util" ) @@ -93,7 +94,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana EtcdDataDir: EtcdDataDir(), ClusterName: cc.Name, //kubeadm uses NodeName as the --hostname-override parameter, so this needs to be the name of the machine - NodeName: n.InternalName(), + NodeName: driver.KubeNodeName(cc, n), CRISocket: r.SocketPath(), ImageRepository: k8s.ImageRepository, ComponentOptions: componentOpts, diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index 06e52d0ed2..3f22f8fbc7 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -26,6 +26,7 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/util" ) @@ -59,7 +60,7 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage if _, ok := extraOpts["node-ip"]; !ok { extraOpts["node-ip"] = cp.IP } - nodeName := nc.InternalName() + nodeName := driver.KubeNodeName(mc, nc) if nodeName != "" { extraOpts["hostname-override"] = nodeName } diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index af1dc66773..97c2a13039 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -18,7 +18,6 @@ package config import ( "net" - "os" "github.com/blang/semver" ) @@ -103,17 +102,6 @@ type Node struct { Worker bool } -// returns the name to be registered kubernetes -func (n Node) InternalName() string { - if n.Name == "" { - // Always use hostname for "none" driver - hostname, _ := os.Hostname() - return hostname - } - - return n.Name -} - // VersionedExtraOption holds information on flags to apply to a specific range // of versions type VersionedExtraOption struct { diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index b6106474d8..54f379439b 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -237,6 +237,16 @@ func MachineName(cc config.ClusterConfig, n config.Node) string { return fmt.Sprintf("%s---%s", cc.Name, n.Name) } +// KubeNodeName returns the node name registered in Kubernetes +func KubeNodeName(cc config.ClusterConfig, n config.Node) string { + if cc.Driver == None { + // Always use hostname for "none" driver + hostname, _ := os.Hostname() + return hostname + } + return MachineName(cc, n) +} + // ClusterNameFromMachine retrieves the cluster name embedded in the machine name func ClusterNameFromMachine(name string) (string, string) { if strings.Contains(name, "---") { From 43b5f7c86d79d33b8530a104079371ebde8324da Mon Sep 17 00:00:00 2001 From: Zhongcheng Lao Date: Sun, 22 Mar 2020 15:11:03 +0800 Subject: [PATCH 157/668] Move KubeNodeName out of driver package --- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 3 +-- pkg/minikube/bootstrapper/bsutil/kubelet.go | 13 ++++++++++++- pkg/minikube/driver/driver.go | 10 ---------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index bcc363c9da..46104c1d44 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -29,7 +29,6 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util" ) @@ -94,7 +93,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana EtcdDataDir: EtcdDataDir(), ClusterName: cc.Name, //kubeadm uses NodeName as the --hostname-override parameter, so this needs to be the name of the machine - NodeName: driver.KubeNodeName(cc, n), + NodeName: KubeNodeName(cc, n), CRISocket: r.SocketPath(), ImageRepository: k8s.ImageRepository, ComponentOptions: componentOpts, diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index 3f22f8fbc7..c2180838a3 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -19,6 +19,7 @@ package bsutil import ( "bytes" + "os" "path" "github.com/pkg/errors" @@ -60,7 +61,7 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage if _, ok := extraOpts["node-ip"]; !ok { extraOpts["node-ip"] = cp.IP } - nodeName := driver.KubeNodeName(mc, nc) + nodeName := KubeNodeName(mc, nc) if nodeName != "" { extraOpts["hostname-override"] = nodeName } @@ -117,3 +118,13 @@ func NewKubeletService(cfg config.KubernetesConfig) ([]byte, error) { } return b.Bytes(), nil } + +// KubeNodeName returns the node name registered in Kubernetes +func KubeNodeName(cc config.ClusterConfig, n config.Node) string { + if cc.Driver == driver.None { + // Always use hostname for "none" driver + hostname, _ := os.Hostname() + return hostname + } + return driver.MachineName(cc, n) +} diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index 54f379439b..b6106474d8 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -237,16 +237,6 @@ func MachineName(cc config.ClusterConfig, n config.Node) string { return fmt.Sprintf("%s---%s", cc.Name, n.Name) } -// KubeNodeName returns the node name registered in Kubernetes -func KubeNodeName(cc config.ClusterConfig, n config.Node) string { - if cc.Driver == None { - // Always use hostname for "none" driver - hostname, _ := os.Hostname() - return hostname - } - return MachineName(cc, n) -} - // ClusterNameFromMachine retrieves the cluster name embedded in the machine name func ClusterNameFromMachine(name string) (string, string) { if strings.Contains(name, "---") { From 6ee7e6fa6736d0f514da3c00341429a34b27b24f Mon Sep 17 00:00:00 2001 From: vikkyomkar Date: Sun, 22 Mar 2020 19:25:03 +0530 Subject: [PATCH 158/668] updated as per suggestion --- pkg/minikube/driver/driver.go | 15 ++-------- pkg/minikube/registry/global.go | 44 ++++++++++++++++++++++++++-- pkg/minikube/registry/global_test.go | 2 +- 3 files changed, 46 insertions(+), 15 deletions(-) diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index b790af5c9b..efbfac95cd 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -165,22 +165,13 @@ func FlagDefaults(name string) FlagHints { // Choices returns a list of drivers which are possible on this system func Choices(vm bool) []registry.DriverState { - var drivers []registry.DriverState - options := registry.Available() - if vm { - for _, ds := range options { - if IsVM(ds.Name) { - drivers = append(drivers, ds) - } - } - } else { - drivers = options - } + options := registry.Available(vm) + // Descending priority for predictability and appearance sort.Slice(options, func(i, j int) bool { return options[i].Priority > options[j].Priority }) - return drivers + return options } // Suggest returns a suggested driver from a set of options diff --git a/pkg/minikube/registry/global.go b/pkg/minikube/registry/global.go index 301f61cb9f..16ede79a27 100644 --- a/pkg/minikube/registry/global.go +++ b/pkg/minikube/registry/global.go @@ -24,6 +24,40 @@ import ( "github.com/golang/glog" ) +const ( + // Podman is Kubernetes in container using podman driver + Podman = "podman" + // Docker is Kubernetes in container using docker driver + Docker = "docker" + // Mock driver + Mock = "mock" + // None driver + None = "none" +) + +// IsKIC checks if the driver is a kubernetes in container +func IsKIC(name string) bool { + return name == Docker || name == Podman +} + +// IsMock checks if the driver is a mock +func IsMock(name string) bool { + return name == Mock +} + +// IsVM checks if the driver is a VM +func IsVM(name string) bool { + if IsKIC(name) || IsMock(name) || BareMetal(name) { + return false + } + return true +} + +// BareMetal returns if this driver is unisolated +func BareMetal(name string) bool { + return name == None || name == Mock +} + var ( // globalRegistry is a globally accessible driver registry globalRegistry = newRegistry() @@ -59,7 +93,7 @@ func Driver(name string) DriverDef { } // Available returns a list of available drivers in the global registry -func Available() []DriverState { +func Available(vm bool) []DriverState { sts := []DriverState{} glog.Infof("Querying for installed drivers using PATH=%s", os.Getenv("PATH")) @@ -76,7 +110,13 @@ func Available() []DriverState { priority = Unhealthy } - sts = append(sts, DriverState{Name: d.Name, Priority: priority, State: s}) + if vm { + if IsVM(d.Name) { + sts = append(sts, DriverState{Name: d.Name, Priority: priority, State: s}) + } + } else { + sts = append(sts, DriverState{Name: d.Name, Priority: priority, State: s}) + } } // Descending priority for predictability diff --git a/pkg/minikube/registry/global_test.go b/pkg/minikube/registry/global_test.go index dbc76b6d51..9cb01a1e35 100644 --- a/pkg/minikube/registry/global_test.go +++ b/pkg/minikube/registry/global_test.go @@ -102,7 +102,7 @@ func TestGlobalAvailable(t *testing.T) { }, } - if diff := cmp.Diff(Available(), expected); diff != "" { + if diff := cmp.Diff(Available(false), expected); diff != "" { t.Errorf("available mismatch (-want +got):\n%s", diff) } } From 566196c9d0db8a4524cda9f7e72e77c8e87ff973 Mon Sep 17 00:00:00 2001 From: Zhongcheng Lao Date: Mon, 23 Mar 2020 00:06:05 +0800 Subject: [PATCH 159/668] Do not run GPU plugin under priviledge mode --- deploy/addons/gpu/nvidia-gpu-device-plugin.yaml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/deploy/addons/gpu/nvidia-gpu-device-plugin.yaml b/deploy/addons/gpu/nvidia-gpu-device-plugin.yaml index d4ee2ead9c..e895e5394a 100644 --- a/deploy/addons/gpu/nvidia-gpu-device-plugin.yaml +++ b/deploy/addons/gpu/nvidia-gpu-device-plugin.yaml @@ -42,9 +42,6 @@ spec: - name: device-plugin hostPath: path: /var/lib/kubelet/device-plugins - - name: dev - hostPath: - path: /dev containers: - image: "nvidia/k8s-device-plugin:1.0.0-beta4" command: ["/usr/bin/nvidia-device-plugin", "-logtostderr"] @@ -54,11 +51,11 @@ spec: cpu: 50m memory: 10Mi securityContext: - privileged: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] volumeMounts: - name: device-plugin mountPath: /var/lib/kubelet/device-plugins - - name: dev - mountPath: /dev updateStrategy: type: RollingUpdate From 2b87e766c6114bb107cee48c0950c87041704b6e Mon Sep 17 00:00:00 2001 From: Tom Date: Sun, 22 Mar 2020 13:00:14 -0700 Subject: [PATCH 160/668] Update nfsexports version --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index a703f57861..be48dd84f6 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/hooklift/iso9660 v0.0.0-20170318115843-1cf07e5970d8 github.com/imdario/mergo v0.3.8 // indirect github.com/intel-go/cpuid v0.0.0-20181003105527-1a4a6f06a1c6 // indirect - github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 + github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d // indirect github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 // indirect diff --git a/go.sum b/go.sum index 8a4fe67c98..875768f6c7 100644 --- a/go.sum +++ b/go.sum @@ -423,6 +423,8 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 h1:XP1VL9iOZu4yz/rq8zj+yvB23XEY5erXRzp8JYmkWu0= github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= +github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f h1:tL0xH80QVHQOde6Qqdohv6PewABH8l8N9pywZtuojJ0= +github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= From b47cd9c0cb2f098df6c717483b02664aa2f9a59e Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 23 Mar 2020 08:11:45 -0700 Subject: [PATCH 161/668] Consistently detect old driver when existing.Driver is unset --- cmd/minikube/cmd/start.go | 57 +++++++++++++++++++++++---------------- 1 file changed, 34 insertions(+), 23 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 559407c04f..f758cdbc74 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -466,8 +466,9 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState { driver.SetLibvirtURI(viper.GetString(kvmQemuURI)) // By default, the driver is whatever we used last time - if existing != nil && existing.Driver != "" { - ds := driver.Status(existing.Driver) + if existing != nil { + old := hostDriver(existing) + ds := driver.Status(old) out.T(out.Sparkle, `Using the {{.driver}} driver based on existing profile`, out.V{"driver": ds.String()}) return ds } @@ -519,46 +520,55 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState { return pick } +// hostDriver returns the true driver used without relying on config fields +func hostDriver(existing *config.ClusterConfig) string { + api, err := machine.NewAPIClient() + if err != nil { + glog.Warningf("selectDriver NewAPIClient: %v", err) + return existing.Driver + } + + cp, err := config.PrimaryControlPlane(existing) + if err != nil { + glog.Warningf("Unable to get control plane from existing config: %v", err) + return existing.Driver + } + machineName := driver.MachineName(*existing, cp) + h, err := api.Load(machineName) + if err != nil { + glog.Warningf("selectDriver api.Load: %v", err) + return existing.Driver + } + + return h.Driver.DriverName() +} + // validateSpecifiedDriver makes sure that if a user has passed in a driver // it matches the existing cluster if there is one func validateSpecifiedDriver(existing *config.ClusterConfig) { if existing == nil { return } - old := existing.Driver + var requested string if d := viper.GetString("driver"); d != "" { requested = d } else if d := viper.GetString("vm-driver"); d != "" { requested = d } + // Neither --vm-driver or --driver was specified if requested == "" { return } - if old == requested { - return - } - api, err := machine.NewAPIClient() - if err != nil { - glog.Warningf("selectDriver NewAPIClient: %v", err) - return - } - - cp, err := config.PrimaryControlPlane(existing) - if err != nil { - exit.WithError("Error getting primary cp", err) - } - machineName := driver.MachineName(*existing, cp) - h, err := api.Load(machineName) - if err != nil { - glog.Warningf("selectDriver api.Load: %v", err) + old := hostDriver(existing) + if requested == old { return } out.ErrT(out.Conflict, `The existing "{{.profile_name}}" VM was created using the "{{.old_driver}}" driver, and is incompatible with the "{{.driver}}" driver.`, - out.V{"profile_name": machineName, "driver": requested, "old_driver": h.Driver.DriverName()}) + out.V{"profile_name": existing.Name, "driver": requested, "old_driver": old}) out.ErrT(out.Workaround, `To proceed, either: @@ -567,7 +577,7 @@ func validateSpecifiedDriver(existing *config.ClusterConfig) { * or * 2) Start the existing "{{.profile_name}}" cluster using: '{{.command}} start --driver={{.old_driver}}' -`, out.V{"command": minikubeCmd(), "old_driver": h.Driver.DriverName(), "profile_name": machineName}) +`, out.V{"command": minikubeCmd(), "old_driver": old, "profile_name": existing.Name}) exit.WithCodeT(exit.Config, "Exiting.") } @@ -582,6 +592,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { st := ds.State glog.Infof("status for %s: %+v", name, st) + old := hostDriver(existing) if st.Error != nil { out.ErrLn("") @@ -594,7 +605,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { out.ErrLn("") if !st.Installed && !viper.GetBool(force) { - if existing != nil && name == existing.Driver { + if existing != nil && name == old { exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}", out.V{"driver": name}) } exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed", out.V{"driver": name}) From a55573be125d720b1d5d08a5d812aeafb506b4b9 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 23 Mar 2020 08:33:49 -0700 Subject: [PATCH 162/668] Fix results when existing is nil --- cmd/minikube/cmd/start.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index f758cdbc74..6b39b8955f 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -522,6 +522,9 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState { // hostDriver returns the true driver used without relying on config fields func hostDriver(existing *config.ClusterConfig) string { + if existing == nil { + return "" + } api, err := machine.NewAPIClient() if err != nil { glog.Warningf("selectDriver NewAPIClient: %v", err) @@ -592,7 +595,6 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { st := ds.State glog.Infof("status for %s: %+v", name, st) - old := hostDriver(existing) if st.Error != nil { out.ErrLn("") @@ -605,8 +607,10 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { out.ErrLn("") if !st.Installed && !viper.GetBool(force) { - if existing != nil && name == old { - exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}", out.V{"driver": name}) + if existing != nil { + if old := hostDriver(existing); name == old { + exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}", out.V{"driver": name}) + } } exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed", out.V{"driver": name}) } From 14f8ee3984f292417b6a4681fef7647512c70cd0 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 23 Mar 2020 09:41:00 -0700 Subject: [PATCH 163/668] hyperv Delete: call StopHost before removing VM --- cmd/minikube/cmd/delete.go | 2 +- pkg/minikube/machine/delete.go | 18 +++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index 253f48128a..23c3229ac4 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -402,7 +402,7 @@ func deleteProfileDirectory(profile string) { out.T(out.DeletingHost, `Removing {{.directory}} ...`, out.V{"directory": machineDir}) err := os.RemoveAll(machineDir) if err != nil { - exit.WithError("Unable to remove machine directory: %v", err) + exit.WithError("Unable to remove machine directory", err) } } } diff --git a/pkg/minikube/machine/delete.go b/pkg/minikube/machine/delete.go index 888f8f158e..c51a9d0931 100644 --- a/pkg/minikube/machine/delete.go +++ b/pkg/minikube/machine/delete.go @@ -75,18 +75,26 @@ func DeleteHost(api libmachine.API, machineName string) error { return mcnerror.ErrHostDoesNotExist{Name: machineName} } - // This is slow if SSH is not responding, but HyperV hangs otherwise, See issue #2914 + // Hyper-V requires special care to avoid ACPI and file locking issues if host.Driver.DriverName() == driver.HyperV { - if err := trySSHPowerOff(host); err != nil { - glog.Infof("Unable to power off minikube because the host was not found.") + if err := StopHost(api, machineName); err != nil { + glog.Warningf("stop host: %v", err) } - out.T(out.DeletingHost, "Successfully powered off Hyper-V. minikube driver -- {{.driver}}", out.V{"driver": host.Driver.DriverName()}) + // Hack: give the Hyper-V VM more time to stop before deletion + time.Sleep(1 * time.Second) } out.T(out.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName}) if err := host.Driver.Remove(); err != nil { - return errors.Wrap(err, "host remove") + glog.Warningf("remove failed, will retry: %v", err) + time.Sleep(2 * time.Second) + + nerr := host.Driver.Remove() + if nerr != nil { + return errors.Wrap(nerr, "host remove retry") + } } + if err := api.Remove(machineName); err != nil { return errors.Wrap(err, "api remove") } From cf31a34071e534d9fdfc7d965ed8420edcbc9714 Mon Sep 17 00:00:00 2001 From: priyawadhwa Date: Mon, 23 Mar 2020 11:45:34 -0700 Subject: [PATCH 164/668] Update roadmap.en.md --- site/content/en/docs/Contributing/roadmap.en.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/site/content/en/docs/Contributing/roadmap.en.md b/site/content/en/docs/Contributing/roadmap.en.md index 37b135a78f..d169145a59 100644 --- a/site/content/en/docs/Contributing/roadmap.en.md +++ b/site/content/en/docs/Contributing/roadmap.en.md @@ -14,9 +14,9 @@ Please send a PR to suggest any improvements to it. ## (#1) Inclusive and community-driven -- [ ] Maintainers from 4 countries, 4 companies +- [x] Maintainers from 4 countries, 4 companies - [ ] Installation documentation in 5+ written languages -- [ ] Enhancements approved by a community-driven process +- [x] Enhancements approved by a community-driven process ## (#2) User-friendly @@ -33,7 +33,7 @@ Please send a PR to suggest any improvements to it. ## (#4) Cross-platform -- [ ] VM-free deployment to containers (Docker, Podman) +- [x] VM-free deployment to containers (Docker, Podman) - [ ] Windows as a first-class citizen - [ ] WSL2 support (no additional VM required) - [ ] Firecracker VM support @@ -49,7 +49,7 @@ Please send a PR to suggest any improvements to it. - [ ] Startup latency under 30s - [ ] Kernel-assisted mounts (CIFS, NFS) by default -- [ ] Suspend and Resume +- [x] Pause support - [ ] <25% CPU overhead on a single core ## (#7) Developer Focused From bfe8aee3b57ad4c07660cfb63ce83ccf9c7e6d9b Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Mon, 23 Mar 2020 12:29:49 -0700 Subject: [PATCH 165/668] Pass in container runtime to preload tarball name This way we don't have to individually check if we support a container runtime --- hack/preload-images/preload_images.go | 2 +- pkg/drivers/kic/kic.go | 2 +- pkg/minikube/cruntime/docker.go | 3 +- pkg/minikube/download/preload.go | 47 +++++++++++++-------------- 4 files changed, 26 insertions(+), 28 deletions(-) diff --git a/hack/preload-images/preload_images.go b/hack/preload-images/preload_images.go index 60b6bc9e2a..7427d867e8 100644 --- a/hack/preload-images/preload_images.go +++ b/hack/preload-images/preload_images.go @@ -61,7 +61,7 @@ func main() { for _, kv := range k8sVersions { for _, cr := range containerRuntimes { - tf := download.TarballName(kv) + tf := download.TarballName(kv, cr) if tarballExists(tf) { fmt.Printf("A preloaded tarball for k8s version %s already exists, skipping generation.\n", kv) continue diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index 2620d981ff..d75f7ba01e 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -126,7 +126,7 @@ func (d *Driver) Create() error { t := time.Now() glog.Infof("Starting extracting preloaded images to volume") // Extract preloaded images to container - if err := oci.ExtractTarballToVolume(download.TarballPath(d.NodeConfig.KubernetesVersion), params.Name, BaseImage); err != nil { + if err := oci.ExtractTarballToVolume(download.TarballPath(d.NodeConfig.KubernetesVersion, d.NodeConfig.ContainerRuntime), params.Name, BaseImage); err != nil { glog.Infof("Unable to extract preloaded tarball to volume: %v", err) } else { glog.Infof("Took %f seconds to extract preloaded images to volume", time.Since(t).Seconds()) diff --git a/pkg/minikube/cruntime/docker.go b/pkg/minikube/cruntime/docker.go index 8641092573..0812eece0e 100644 --- a/pkg/minikube/cruntime/docker.go +++ b/pkg/minikube/cruntime/docker.go @@ -291,6 +291,7 @@ func (r *Docker) SystemLogCmd(len int) string { // 3. Remove the tarball within the VM func (r *Docker) Preload(cfg config.KubernetesConfig) error { k8sVersion := cfg.KubernetesVersion + cRuntime := cfg.ContainerRuntime // If images already exist, return images, err := images.Kubeadm(cfg.ImageRepository, k8sVersion) @@ -307,7 +308,7 @@ func (r *Docker) Preload(cfg config.KubernetesConfig) error { glog.Infof("error saving reference store: %v", err) } - tarballPath := download.TarballPath(k8sVersion) + tarballPath := download.TarballPath(k8sVersion, cRuntime) targetDir := "/" targetName := "preloaded.tar.lz4" dest := path.Join(targetDir, targetName) diff --git a/pkg/minikube/download/preload.go b/pkg/minikube/download/preload.go index 2008d4eddc..0fe1ea7778 100644 --- a/pkg/minikube/download/preload.go +++ b/pkg/minikube/download/preload.go @@ -46,13 +46,13 @@ const ( ) // TarballName returns name of the tarball -func TarballName(k8sVersion string) string { - return fmt.Sprintf("preloaded-images-k8s-%s-%s-docker-overlay2-%s.tar.lz4", PreloadVersion, k8sVersion, runtime.GOOS) +func TarballName(k8sVersion, containerRuntime string) string { + return fmt.Sprintf("preloaded-images-k8s-%s-%s-%s-overlay2-%s.tar.lz4", PreloadVersion, k8sVersion, containerRuntime, runtime.GOOS) } // returns the name of the checksum file -func checksumName(k8sVersion string) string { - return fmt.Sprintf("%s.checksum", TarballName(k8sVersion)) +func checksumName(k8sVersion, containerRuntime string) string { + return fmt.Sprintf("%s.checksum", TarballName(k8sVersion, containerRuntime)) } // returns target dir for all cached items related to preloading @@ -61,18 +61,18 @@ func targetDir() string { } // PreloadChecksumPath returns path to checksum file -func PreloadChecksumPath(k8sVersion string) string { - return path.Join(targetDir(), checksumName(k8sVersion)) +func PreloadChecksumPath(k8sVersion, containerRuntime string) string { + return path.Join(targetDir(), checksumName(k8sVersion, containerRuntime)) } // TarballPath returns the path to the preloaded tarball -func TarballPath(k8sVersion string) string { - return path.Join(targetDir(), TarballName(k8sVersion)) +func TarballPath(k8sVersion, containerRuntime string) string { + return path.Join(targetDir(), TarballName(k8sVersion, containerRuntime)) } // remoteTarballURL returns the URL for the remote tarball in GCS -func remoteTarballURL(k8sVersion string) string { - return fmt.Sprintf("https://storage.googleapis.com/%s/%s", PreloadBucket, TarballName(k8sVersion)) +func remoteTarballURL(k8sVersion, containerRuntime string) string { + return fmt.Sprintf("https://storage.googleapis.com/%s/%s", PreloadBucket, TarballName(k8sVersion, containerRuntime)) } // PreloadExists returns true if there is a preloaded tarball that can be used @@ -82,13 +82,13 @@ func PreloadExists(k8sVersion, containerRuntime string) bool { } // Omit remote check if tarball exists locally - targetPath := TarballPath(k8sVersion) + targetPath := TarballPath(k8sVersion, containerRuntime) if _, err := os.Stat(targetPath); err == nil { glog.Infof("Found local preload: %s", targetPath) return true } - url := remoteTarballURL(k8sVersion) + url := remoteTarballURL(k8sVersion, containerRuntime) resp, err := http.Head(url) if err != nil { glog.Warningf("%s fetch error: %v", url, err) @@ -107,10 +107,7 @@ func PreloadExists(k8sVersion, containerRuntime string) bool { // Preload caches the preloaded images tarball on the host machine func Preload(k8sVersion, containerRuntime string) error { - if containerRuntime != "docker" { - return nil - } - targetPath := TarballPath(k8sVersion) + targetPath := TarballPath(k8sVersion, containerRuntime) if _, err := os.Stat(targetPath); err == nil { glog.Infof("Found %s in cache, skipping download", targetPath) @@ -124,7 +121,7 @@ func Preload(k8sVersion, containerRuntime string) error { } out.T(out.FileDownload, "Downloading preloaded images tarball for k8s {{.version}} ...", out.V{"version": k8sVersion}) - url := remoteTarballURL(k8sVersion) + url := remoteTarballURL(k8sVersion, containerRuntime) tmpDst := targetPath + ".download" client := &getter.Client{ @@ -139,34 +136,34 @@ func Preload(k8sVersion, containerRuntime string) error { return errors.Wrapf(err, "download failed: %s", url) } - if err := saveChecksumFile(k8sVersion); err != nil { + if err := saveChecksumFile(k8sVersion, containerRuntime); err != nil { return errors.Wrap(err, "saving checksum file") } - if err := verifyChecksum(k8sVersion, tmpDst); err != nil { + if err := verifyChecksum(k8sVersion, containerRuntime, tmpDst); err != nil { return errors.Wrap(err, "verify") } return os.Rename(tmpDst, targetPath) } -func saveChecksumFile(k8sVersion string) error { - glog.Infof("saving checksum for %s ...", TarballName(k8sVersion)) +func saveChecksumFile(k8sVersion, containerRuntime string) error { + glog.Infof("saving checksum for %s ...", TarballName(k8sVersion, containerRuntime)) ctx := context.Background() client, err := storage.NewClient(ctx, option.WithoutAuthentication()) if err != nil { return errors.Wrap(err, "getting storage client") } - attrs, err := client.Bucket(PreloadBucket).Object(TarballName(k8sVersion)).Attrs(ctx) + attrs, err := client.Bucket(PreloadBucket).Object(TarballName(k8sVersion, containerRuntime)).Attrs(ctx) if err != nil { return errors.Wrap(err, "getting storage object") } checksum := attrs.MD5 - return ioutil.WriteFile(PreloadChecksumPath(k8sVersion), checksum, 0644) + return ioutil.WriteFile(PreloadChecksumPath(k8sVersion, containerRuntime), checksum, 0644) } // verifyChecksum returns true if the checksum of the local binary matches // the checksum of the remote binary -func verifyChecksum(k8sVersion string, path string) error { +func verifyChecksum(k8sVersion, containerRuntime, path string) error { glog.Infof("verifying checksumm of %s ...", path) // get md5 checksum of tarball path contents, err := ioutil.ReadFile(path) @@ -175,7 +172,7 @@ func verifyChecksum(k8sVersion string, path string) error { } checksum := md5.Sum(contents) - remoteChecksum, err := ioutil.ReadFile(PreloadChecksumPath(k8sVersion)) + remoteChecksum, err := ioutil.ReadFile(PreloadChecksumPath(k8sVersion, containerRuntime)) if err != nil { return errors.Wrap(err, "reading checksum file") } From 72c4bef5caea7f0c8ed616bea7a0b90b95cf43f2 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Mon, 23 Mar 2020 12:32:36 -0700 Subject: [PATCH 166/668] update integration test --- test/integration/aaa_download_only_test.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index 9640212800..99f43a2f05 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -73,7 +73,7 @@ func TestDownloadOnly(t *testing.T) { if download.PreloadExists(v, r) { // Just make sure the tarball path exists - if _, err := os.Stat(download.TarballPath(v)); err != nil { + if _, err := os.Stat(download.TarballPath(v, r)); err != nil { t.Errorf("preloaded tarball path doesn't exist: %v", err) } return @@ -150,6 +150,8 @@ func TestDownloadOnlyDocker(t *testing.T) { t.Skip("this test only runs with the docker driver") } + cRuntime := "docker" + profile := UniqueProfileName("download-docker") ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) defer Cleanup(t, profile, cancel) @@ -161,14 +163,14 @@ func TestDownloadOnlyDocker(t *testing.T) { } // Make sure the downloaded image tarball exists - tarball := download.TarballPath(constants.DefaultKubernetesVersion) + tarball := download.TarballPath(constants.DefaultKubernetesVersion, cRuntime) contents, err := ioutil.ReadFile(tarball) if err != nil { t.Errorf("reading tarball: %v", err) } // Make sure it has the correct checksum checksum := md5.Sum(contents) - remoteChecksum, err := ioutil.ReadFile(download.PreloadChecksumPath(constants.DefaultKubernetesVersion)) + remoteChecksum, err := ioutil.ReadFile(download.PreloadChecksumPath(constants.DefaultKubernetesVersion, cRuntime)) if err != nil { t.Errorf("reading checksum file: %v", err) } From 8f9a2b94136bb13749076932804bdd4768880282 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Sat, 21 Mar 2020 18:39:45 -0700 Subject: [PATCH 167/668] Consolidate cluster loading from cmd --- cmd/minikube/cmd/config/addons_list.go | 5 +- cmd/minikube/cmd/config/disable.go | 4 +- cmd/minikube/cmd/config/enable.go | 4 +- cmd/minikube/cmd/config/get.go | 4 +- cmd/minikube/cmd/config/open.go | 32 +---- cmd/minikube/cmd/config/profile.go | 16 ++- cmd/minikube/cmd/config/set.go | 8 +- cmd/minikube/cmd/config/unset.go | 6 +- cmd/minikube/cmd/config/util_test.go | 23 +--- cmd/minikube/cmd/dashboard.go | 66 ++-------- cmd/minikube/cmd/delete.go | 18 +-- cmd/minikube/cmd/docker-env.go | 151 ++++++++-------------- cmd/minikube/cmd/ip.go | 37 +----- cmd/minikube/cmd/kubectl.go | 22 +--- cmd/minikube/cmd/logs.go | 49 ++------ cmd/minikube/cmd/mount.go | 40 ++---- cmd/minikube/cmd/node_add.go | 16 +-- cmd/minikube/cmd/node_delete.go | 15 +-- cmd/minikube/cmd/node_start.go | 15 +-- cmd/minikube/cmd/node_stop.go | 14 +-- cmd/minikube/cmd/pause.go | 28 +---- cmd/minikube/cmd/podman-env.go | 118 +++++++----------- cmd/minikube/cmd/service.go | 32 +---- cmd/minikube/cmd/service_list.go | 29 +---- cmd/minikube/cmd/ssh-key.go | 9 +- cmd/minikube/cmd/ssh.go | 34 ++--- cmd/minikube/cmd/start.go | 22 ++-- cmd/minikube/cmd/status.go | 21 +--- cmd/minikube/cmd/stop.go | 19 +-- cmd/minikube/cmd/tunnel.go | 25 ++-- cmd/minikube/cmd/unpause.go | 30 ++--- cmd/minikube/cmd/update-context.go | 23 +--- pkg/minikube/mustload/mustload.go | 166 +++++++++++++++++++++++++ pkg/minikube/out/style.go | 2 + pkg/minikube/out/style_enum.go | 2 + 35 files changed, 421 insertions(+), 684 deletions(-) create mode 100644 pkg/minikube/mustload/mustload.go diff --git a/cmd/minikube/cmd/config/addons_list.go b/cmd/minikube/cmd/config/addons_list.go index 73c72ec37e..5ed95a850b 100644 --- a/cmd/minikube/cmd/config/addons_list.go +++ b/cmd/minikube/cmd/config/addons_list.go @@ -26,7 +26,6 @@ import ( "github.com/golang/glog" "github.com/olekukonko/tablewriter" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" @@ -98,7 +97,7 @@ var printAddonsList = func() { table.SetAutoFormatHeaders(true) table.SetBorders(tablewriter.Border{Left: true, Top: true, Right: true, Bottom: true}) table.SetCenterSeparator("|") - pName := viper.GetString(config.ProfileName) + pName := ClusterFlagValue() for _, addonName := range addonNames { addonBundle := assets.Addons[addonName] @@ -123,7 +122,7 @@ var printAddonsList = func() { var printAddonsJSON = func() { addonNames := make([]string, 0, len(assets.Addons)) - pName := viper.GetString(config.ProfileName) + pName := ClusterFlagValue() for addonName := range assets.Addons { addonNames = append(addonNames, addonName) } diff --git a/cmd/minikube/cmd/config/disable.go b/cmd/minikube/cmd/config/disable.go index af050c105e..552eaf7108 100644 --- a/cmd/minikube/cmd/config/disable.go +++ b/cmd/minikube/cmd/config/disable.go @@ -18,9 +18,7 @@ package config import ( "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/addons" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out" ) @@ -35,7 +33,7 @@ var addonsDisableCmd = &cobra.Command{ } addon := args[0] - err := addons.Set(addon, "false", viper.GetString(config.ProfileName)) + err := addons.Set(addon, "false", ClusterFlagValue()) if err != nil { exit.WithError("disable failed", err) } diff --git a/cmd/minikube/cmd/config/enable.go b/cmd/minikube/cmd/config/enable.go index 5f325a6eed..99cbb3bb88 100644 --- a/cmd/minikube/cmd/config/enable.go +++ b/cmd/minikube/cmd/config/enable.go @@ -18,9 +18,7 @@ package config import ( "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/addons" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out" ) @@ -34,7 +32,7 @@ var addonsEnableCmd = &cobra.Command{ exit.UsageT("usage: minikube addons enable ADDON_NAME") } addon := args[0] - err := addons.Set(addon, "true", viper.GetString(config.ProfileName)) + err := addons.Set(addon, "true", ClusterFlagValue()) if err != nil { exit.WithError("enable failed", err) } diff --git a/cmd/minikube/cmd/config/get.go b/cmd/minikube/cmd/config/get.go index 5c3f034ab7..f5a7899aa2 100644 --- a/cmd/minikube/cmd/config/get.go +++ b/cmd/minikube/cmd/config/get.go @@ -21,7 +21,7 @@ import ( "fmt" "github.com/spf13/cobra" - pkgConfig "k8s.io/minikube/pkg/minikube/config" + config "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/out" ) @@ -59,5 +59,5 @@ func init() { // Get gets a property func Get(name string) (string, error) { - return pkgConfig.Get(name) + return config.Get(name) } diff --git a/cmd/minikube/cmd/config/open.go b/cmd/minikube/cmd/config/open.go index c4c8b5416d..babbc7c00b 100644 --- a/cmd/minikube/cmd/config/open.go +++ b/cmd/minikube/cmd/config/open.go @@ -18,19 +18,14 @@ package config import ( "fmt" - "os" "text/template" "github.com/pkg/browser" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/assets" - "k8s.io/minikube/pkg/minikube/config" - pkg_config "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/service" ) @@ -62,32 +57,17 @@ var addonsOpenCmd = &cobra.Command{ exit.UsageT("usage: minikube addons open ADDON_NAME") } addonName := args[0] - // TODO(r2d4): config should not reference API, pull this out - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - profileName := viper.GetString(pkg_config.ProfileName) - cc, err := config.Load(profileName) - if err != nil { - exit.WithError("Error getting cluster", err) - } - cp, err := config.PrimaryControlPlane(cc) - if err != nil { - exit.WithError("Error getting control plane", err) - } - if !machine.IsRunning(api, driver.MachineName(*cc, cp)) { - os.Exit(1) - } + cname := ClusterFlagValue() + co := mustload.Healthy(cname) + addon, ok := assets.Addons[addonName] // validate addon input if !ok { exit.WithCodeT(exit.Data, `addon '{{.name}}' is not a valid addon packaged with minikube. To see the list of available addons run: minikube addons list`, out.V{"name": addonName}) } - ok, err = addon.IsEnabled(profileName) + ok, err := addon.IsEnabled(cname) if err != nil { exit.WithError("IsEnabled failed", err) } @@ -112,7 +92,7 @@ You can add one by annotating a service with the label {{.labelName}}:{{.addonNa svc := serviceList.Items[i].ObjectMeta.Name var urlString []string - if urlString, err = service.WaitForService(api, namespace, svc, addonsURLTemplate, addonsURLMode, https, wait, interval); err != nil { + if urlString, err = service.WaitForService(co.API, namespace, svc, addonsURLTemplate, addonsURLMode, https, wait, interval); err != nil { exit.WithCodeT(exit.Unavailable, "Wait failed: {{.error}}", out.V{"error": err}) } diff --git a/cmd/minikube/cmd/config/profile.go b/cmd/minikube/cmd/config/profile.go index 31bd0bb0af..4161ee3aac 100644 --- a/cmd/minikube/cmd/config/profile.go +++ b/cmd/minikube/cmd/config/profile.go @@ -20,9 +20,7 @@ import ( "os" "github.com/spf13/cobra" - "github.com/spf13/viper" - pkgConfig "k8s.io/minikube/pkg/minikube/config" - pkg_config "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/out" @@ -35,7 +33,7 @@ var ProfileCmd = &cobra.Command{ Long: "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`", Run: func(cmd *cobra.Command, args []string) { if len(args) == 0 { - profile := viper.GetString(pkgConfig.ProfileName) + profile := ClusterFlagValue() out.T(out.Empty, profile) os.Exit(0) } @@ -49,7 +47,7 @@ var ProfileCmd = &cobra.Command{ we need to add code over here to check whether the profile name is in the list of reserved keywords */ - if pkgConfig.ProfileNameInReservedKeywords(profile) { + if config.ProfileNameInReservedKeywords(profile) { out.ErrT(out.FailureType, `Profile name "{{.profilename}}" is minikube keyword. To delete profile use command minikube delete -p `, out.V{"profilename": profile}) os.Exit(0) } @@ -64,18 +62,18 @@ var ProfileCmd = &cobra.Command{ } } - if !pkgConfig.ProfileExists(profile) { + if !config.ProfileExists(profile) { out.ErrT(out.Tip, `if you want to create a profile you can by this command: minikube start -p {{.profile_name}}`, out.V{"profile_name": profile}) os.Exit(0) } - err := Set(pkgConfig.ProfileName, profile) + err := Set(config.ProfileName, profile) if err != nil { exit.WithError("Setting profile failed", err) } - cc, err := pkgConfig.Load(profile) + cc, err := config.Load(profile) // might err when loading older version of cfg file that doesn't have KeepContext field - if err != nil && !pkg_config.IsNotExist(err) { + if err != nil && !config.IsNotExist(err) { out.ErrT(out.Sad, `Error loading profile config: {{.error}}`, out.V{"error": err}) } if err == nil { diff --git a/cmd/minikube/cmd/config/set.go b/cmd/minikube/cmd/config/set.go index 21f99863bf..b074af6aef 100644 --- a/cmd/minikube/cmd/config/set.go +++ b/cmd/minikube/cmd/config/set.go @@ -19,7 +19,7 @@ package config import ( "github.com/pkg/errors" "github.com/spf13/cobra" - pkgConfig "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/out" @@ -61,11 +61,11 @@ func Set(name string, value string) error { } // Set the value - config, err := pkgConfig.ReadConfig(localpath.ConfigFile()) + cc, err := config.ReadConfig(localpath.ConfigFile()) if err != nil { return errors.Wrapf(err, "read config file %q", localpath.ConfigFile()) } - err = s.set(config, name, value) + err = s.set(cc, name, value) if err != nil { return errors.Wrapf(err, "set") } @@ -77,5 +77,5 @@ func Set(name string, value string) error { } // Write the value - return pkgConfig.WriteConfig(localpath.ConfigFile(), config) + return config.WriteConfig(localpath.ConfigFile(), cc) } diff --git a/cmd/minikube/cmd/config/unset.go b/cmd/minikube/cmd/config/unset.go index 1c68b53d9f..122d8ca828 100644 --- a/cmd/minikube/cmd/config/unset.go +++ b/cmd/minikube/cmd/config/unset.go @@ -18,7 +18,7 @@ package config import ( "github.com/spf13/cobra" - pkgConfig "k8s.io/minikube/pkg/minikube/config" + config "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" ) @@ -44,10 +44,10 @@ func init() { // Unset unsets a property func Unset(name string) error { - m, err := pkgConfig.ReadConfig(localpath.ConfigFile()) + m, err := config.ReadConfig(localpath.ConfigFile()) if err != nil { return err } delete(m, name) - return pkgConfig.WriteConfig(localpath.ConfigFile(), m) + return config.WriteConfig(localpath.ConfigFile(), m) } diff --git a/cmd/minikube/cmd/config/util_test.go b/cmd/minikube/cmd/config/util_test.go index b085ea965f..4347cb4073 100644 --- a/cmd/minikube/cmd/config/util_test.go +++ b/cmd/minikube/cmd/config/util_test.go @@ -20,11 +20,11 @@ import ( "fmt" "testing" - pkgConfig "k8s.io/minikube/pkg/minikube/config" + config "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" ) -var minikubeConfig = pkgConfig.MinikubeConfig{ +var minikubeConfig = config.MinikubeConfig{ "driver": driver.KVM2, "cpus": 12, "show-libmachine-logs": true, @@ -83,21 +83,10 @@ func TestSetBool(t *testing.T) { } func TestValidateProfile(t *testing.T) { - testCases := []struct { - profileName string - }{ - { - profileName: "82374328742_2974224498", - }, - { - profileName: "validate_test", - }, - } - - for _, test := range testCases { - profileNam := test.profileName - expected := fmt.Sprintf("profile %q not found", test.profileName) - err, ok := ValidateProfile(profileNam) + testCases := []string{"82374328742_2974224498", "validate_test"} + for _, name := range testCases { + expected := fmt.Sprintf("profile %q not found", name) + err, ok := ValidateProfile(name) if !ok && err.Error() != expected { t.Errorf("got error %q, expected %q", err, expected) } diff --git a/cmd/minikube/cmd/dashboard.go b/cmd/minikube/cmd/dashboard.go index b5fd848eb3..5a77d95650 100644 --- a/cmd/minikube/cmd/dashboard.go +++ b/cmd/minikube/cmd/dashboard.go @@ -21,25 +21,19 @@ import ( "fmt" "io" "net/http" - "os" "os/exec" "os/user" "regexp" "time" - "github.com/docker/machine/libmachine/mcnerror" "github.com/golang/glog" "github.com/pkg/browser" "github.com/pkg/errors" "github.com/spf13/cobra" - "github.com/spf13/viper" pkgaddons "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/assets" - "k8s.io/minikube/pkg/minikube/config" - pkg_config "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/proxy" "k8s.io/minikube/pkg/minikube/service" @@ -59,47 +53,11 @@ var dashboardCmd = &cobra.Command{ Short: "Access the kubernetes dashboard running within the minikube cluster", Long: `Access the kubernetes dashboard running within the minikube cluster`, Run: func(cmd *cobra.Command, args []string) { - profileName := viper.GetString(pkg_config.ProfileName) - cc, err := pkg_config.Load(profileName) - if err != nil && !pkg_config.IsNotExist(err) { - exit.WithError("Error loading profile config", err) - } + cname := ClusterFlagValue() + co := mustload.Healthy(cname) - if err != nil { - out.ErrT(out.Meh, `"{{.name}}" profile does not exist`, out.V{"name": profileName}) - os.Exit(1) - } - - api, err := machine.NewAPIClient() - defer func() { - err := api.Close() - if err != nil { - glog.Warningf("Failed to close API: %v", err) - } - }() - - if err != nil { - exit.WithError("Error getting client", err) - } - - cp, err := config.PrimaryControlPlane(cc) - if err != nil { - exit.WithError("Error getting primary control plane", err) - } - - machineName := driver.MachineName(*cc, cp) - if _, err = api.Load(machineName); err != nil { - switch err := errors.Cause(err).(type) { - case mcnerror.ErrHostDoesNotExist: - exit.WithCodeT(exit.Unavailable, "{{.name}} cluster does not exist", out.V{"name": cc.Name}) - default: - exit.WithError("Error getting cluster", err) - } - } - - for _, n := range cc.Nodes { - err = proxy.ExcludeIP(n.IP) // to be used for http get calls - if err != nil { + for _, n := range co.Config.Nodes { + if err := proxy.ExcludeIP(n.IP); err != nil { glog.Errorf("Error excluding IP from proxy: %s", err) } } @@ -109,18 +67,14 @@ var dashboardCmd = &cobra.Command{ exit.WithCodeT(exit.NoInput, "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/") } - if !machine.IsRunning(api, machineName) { - os.Exit(1) - } - // Check dashboard status before enabling it dashboardAddon := assets.Addons["dashboard"] - dashboardStatus, _ := dashboardAddon.IsEnabled(profileName) + dashboardStatus, _ := dashboardAddon.IsEnabled(cname) if !dashboardStatus { // Send status messages to stderr for folks re-using this output. out.ErrT(out.Enabling, "Enabling dashboard ...") // Enable the dashboard add-on - err = pkgaddons.Set("dashboard", "true", profileName) + err = pkgaddons.Set("dashboard", "true", cname) if err != nil { exit.WithError("Unable to enable dashboard", err) } @@ -135,7 +89,7 @@ var dashboardCmd = &cobra.Command{ } out.ErrT(out.Launch, "Launching proxy ...") - p, hostPort, err := kubectlProxy(kubectl, machineName) + p, hostPort, err := kubectlProxy(kubectl, cname) if err != nil { exit.WithError("kubectl proxy", err) } @@ -169,10 +123,10 @@ var dashboardCmd = &cobra.Command{ } // kubectlProxy runs "kubectl proxy", returning host:port -func kubectlProxy(path string, machineName string) (*exec.Cmd, string, error) { +func kubectlProxy(path string, contextName string) (*exec.Cmd, string, error) { // port=0 picks a random system port - cmd := exec.Command(path, "--context", machineName, "proxy", "--port=0") + cmd := exec.Command(path, "--context", contextName, "proxy", "--port=0") stdoutPipe, err := cmd.StdoutPipe() if err != nil { diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index 253f48128a..1676861d8b 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -141,10 +141,10 @@ func runDelete(cmd *cobra.Command, args []string) { exit.UsageT("usage: minikube delete") } - profileName := viper.GetString(config.ProfileName) - profile, err := config.LoadProfile(profileName) + cname := ClusterFlagValue() + profile, err := config.LoadProfile(cname) if err != nil { - out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": profileName}) + out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": cname}) } errs := DeleteProfiles([]*config.Profile{profile}) @@ -272,13 +272,13 @@ func deleteHosts(api libmachine.API, cc *config.ClusterConfig) { } } -func deleteConfig(profileName string) error { - if err := config.DeleteProfile(profileName); err != nil { +func deleteConfig(cname string) error { + if err := config.DeleteProfile(cname); err != nil { if config.IsNotExist(err) { - delErr := profileDeletionErr(profileName, fmt.Sprintf("\"%s\" profile does not exist", profileName)) + delErr := profileDeletionErr(cname, fmt.Sprintf("\"%s\" profile does not exist", cname)) return DeletionError{Err: delErr, Errtype: MissingProfile} } - delErr := profileDeletionErr(profileName, fmt.Sprintf("failed to remove profile %v", err)) + delErr := profileDeletionErr(cname, fmt.Sprintf("failed to remove profile %v", err)) return DeletionError{Err: delErr, Errtype: Fatal} } return nil @@ -317,8 +317,8 @@ func deleteInvalidProfile(profile *config.Profile) []error { return errs } -func profileDeletionErr(profileName string, additionalInfo string) error { - return fmt.Errorf("error deleting profile \"%s\": %s", profileName, additionalInfo) +func profileDeletionErr(cname string, additionalInfo string) error { + return fmt.Errorf("error deleting profile \"%s\": %s", cname, additionalInfo) } func uninstallKubernetes(api libmachine.API, cc config.ClusterConfig, n config.Node, bsName string) error { diff --git a/cmd/minikube/cmd/docker-env.go b/cmd/minikube/cmd/docker-env.go index 138c2f0282..4b61a435d2 100644 --- a/cmd/minikube/cmd/docker-env.go +++ b/cmd/minikube/cmd/docker-env.go @@ -24,20 +24,18 @@ import ( "io" "net" "os" + "os/exec" "strconv" "strings" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/state" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/drivers/kic/oci" - "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/shell" ) @@ -117,23 +115,10 @@ func (EnvNoProxyGetter) GetNoProxyVar() (string, string) { } // isDockerActive checks if Docker is active -func isDockerActive(d drivers.Driver) (bool, error) { - client, err := drivers.GetSSHClientFromDriver(d) - if err != nil { - return false, err - } - cmd := "sudo systemctl is-active docker" - - output, err := client.Output(cmd) - s := strings.TrimSpace(output) - - if err != nil { - return false, fmt.Errorf("%s failed: %v\noutput: %q", cmd, err, s) - } - if s != "active" { - return false, fmt.Errorf("%s returned %q", cmd, s) - } - return true, nil +func isDockerActive(r command.Runner) bool { + c := exec.Command("sudo", "systemctl", "is-active", "--quiet", "service", "docker") + _, err := r.RunCmd(c) + return err == nil } // dockerEnvCmd represents the docker-env command @@ -142,94 +127,62 @@ var dockerEnvCmd = &cobra.Command{ Short: "Sets up docker env variables; similar to '$(docker-machine env)'", Long: `Sets up docker env variables; similar to '$(docker-machine env)'.`, Run: func(cmd *cobra.Command, args []string) { - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) + cname := ClusterFlagValue() + co := mustload.Running(cname) + driverName := co.CPHost.DriverName + + if driverName == driver.None { + exit.UsageT(`'none' driver does not support 'minikube docker-env' command`) } - defer api.Close() - profile := viper.GetString(config.ProfileName) - cc, err := config.Load(profile) - if err != nil { - exit.WithError("Error getting config", err) + if co.Config.KubernetesConfig.ContainerRuntime != "docker" { + exit.WithCodeT(exit.BadUsage, `The docker-env command is only compatible with the "docker" runtime, but this cluster was configured to use the "{{.runtime}}" runtime.`, + out.V{"runtime": co.Config.KubernetesConfig.ContainerRuntime}) } - for _, n := range cc.Nodes { - machineName := driver.MachineName(*cc, n) - host, err := machine.LoadHost(api, machineName) + + if ok := isDockerActive(co.CPRunner); !ok { + exit.WithCodeT(exit.Unavailable, `The docker service within '{{.name}}' is not active`, out.V{"name": cname}) + } + + sh := shell.EnvConfig{ + Shell: shell.ForceShell, + } + + var err error + port := constants.DockerDaemonPort + if driver.IsKIC(driverName) { + port, err = oci.ForwardedPort(driverName, cname, port) if err != nil { - exit.WithError("Error getting host", err) - } - if host.Driver.DriverName() == driver.None { - exit.UsageT(`'none' driver does not support 'minikube docker-env' command`) + exit.WithCodeT(exit.Failure, "Error getting port binding for '{{.driver_name}} driver: {{.error}}", out.V{"driver_name": driverName, "error": err}) } + } - hostSt, err := machine.Status(api, machineName) + ec := DockerEnvConfig{ + EnvConfig: sh, + profile: cname, + driver: driverName, + hostIP: co.DriverIP.String(), + port: port, + certsDir: localpath.MakeMiniPath("certs"), + noProxy: noProxy, + } + + if ec.Shell == "" { + ec.Shell, err = shell.Detect() if err != nil { - exit.WithError("Error getting host status", err) - } - if hostSt != state.Running.String() { - exit.WithCodeT(exit.Unavailable, `'{{.profile}}' is not running`, out.V{"profile": profile}) + exit.WithError("Error detecting shell", err) } + } - if cc.KubernetesConfig.ContainerRuntime != "docker" { - exit.WithCodeT(exit.BadUsage, `The docker-env command is only compatible with the "docker" runtime, but this cluster was configured to use the "{{.runtime}}" runtime.`, - out.V{"runtime": cc.KubernetesConfig.ContainerRuntime}) + if dockerUnset { + if err := dockerUnsetScript(ec, os.Stdout); err != nil { + exit.WithError("Error generating unset output", err) } + return + } - ok, err := isDockerActive(host.Driver) - if err != nil { - exit.WithError("Docker runtime check failed", err) - } - - if !ok { - exit.WithCodeT(exit.Unavailable, `The docker service within '{{.profile}}' is not active`, out.V{"profile": profile}) - } - - hostIP, err := host.Driver.GetIP() - if err != nil { - exit.WithError("Error getting host IP", err) - } - - sh := shell.EnvConfig{ - Shell: shell.ForceShell, - } - - port := constants.DockerDaemonPort - if driver.IsKIC(host.DriverName) { // for kic we need to find what port docker/podman chose for us - hostIP = oci.DefaultBindIPV4 - port, err = oci.ForwardedPort(host.DriverName, profile, port) - if err != nil { - exit.WithCodeT(exit.Failure, "Error getting port binding for '{{.driver_name}} driver: {{.error}}", out.V{"driver_name": host.DriverName, "error": err}) - } - } - - ec := DockerEnvConfig{ - EnvConfig: sh, - profile: profile, - driver: host.DriverName, - hostIP: hostIP, - port: port, - certsDir: localpath.MakeMiniPath("certs"), - noProxy: noProxy, - } - - if ec.Shell == "" { - ec.Shell, err = shell.Detect() - if err != nil { - exit.WithError("Error detecting shell", err) - } - } - - if dockerUnset { - if err := dockerUnsetScript(ec, os.Stdout); err != nil { - exit.WithError("Error generating unset output", err) - } - return - } - - if err := dockerSetScript(ec, os.Stdout); err != nil { - exit.WithError("Error generating set output", err) - } + if err := dockerSetScript(ec, os.Stdout); err != nil { + exit.WithError("Error generating set output", err) } }, } diff --git a/cmd/minikube/cmd/ip.go b/cmd/minikube/cmd/ip.go index 466323b4a4..04dd705fb5 100644 --- a/cmd/minikube/cmd/ip.go +++ b/cmd/minikube/cmd/ip.go @@ -17,14 +17,8 @@ limitations under the License. package cmd import ( - "github.com/docker/machine/libmachine/mcnerror" - "github.com/pkg/errors" "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" - "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" ) @@ -34,32 +28,7 @@ var ipCmd = &cobra.Command{ Short: "Retrieves the IP address of the running cluster", Long: `Retrieves the IP address of the running cluster, and writes it to STDOUT.`, Run: func(cmd *cobra.Command, args []string) { - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("Error getting config", err) - } - for _, n := range cc.Nodes { - machineName := driver.MachineName(*cc, n) - host, err := api.Load(machineName) - if err != nil { - switch err := errors.Cause(err).(type) { - case mcnerror.ErrHostDoesNotExist: - exit.WithCodeT(exit.NoInput, `"{{.profile_name}}" host does not exist, unable to show an IP`, out.V{"profile_name": cc.Name}) - default: - exit.WithError("Error getting host", err) - } - } - ip, err := host.Driver.GetIP() - if err != nil { - exit.WithError("Error getting IP", err) - } - out.Ln(ip) - } + co := mustload.Running(ClusterFlagValue()) + out.Ln(co.DriverIP.String()) }, } diff --git a/cmd/minikube/cmd/kubectl.go b/cmd/minikube/cmd/kubectl.go index e24943a7d4..f4867b45e0 100644 --- a/cmd/minikube/cmd/kubectl.go +++ b/cmd/minikube/cmd/kubectl.go @@ -24,10 +24,8 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -42,21 +40,11 @@ Examples: minikube kubectl -- --help minikube kubectl -- get pods --namespace kube-system`, Run: func(cmd *cobra.Command, args []string) { - api, err := machine.NewAPIClient() - if err != nil { - fmt.Fprintf(os.Stderr, "Error getting client: %v\n", err) - os.Exit(1) - } - defer api.Close() + co := mustload.Healthy(ClusterFlagValue()) - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil && !config.IsNotExist(err) { - out.ErrLn("Error loading profile config: %v", err) - } - - version := constants.DefaultKubernetesVersion - if cc != nil { - version = cc.KubernetesConfig.KubernetesVersion + version := co.Config.KubernetesConfig.KubernetesVersion + if version == "" { + version = constants.DefaultKubernetesVersion } path, err := node.CacheKubectlBinary(version) diff --git a/cmd/minikube/cmd/logs.go b/cmd/minikube/cmd/logs.go index e6ca33f9fb..098ab0d99f 100644 --- a/cmd/minikube/cmd/logs.go +++ b/cmd/minikube/cmd/logs.go @@ -21,13 +21,10 @@ import ( "github.com/spf13/viper" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/minikube/cluster" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/logs" - "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/node" + "k8s.io/minikube/pkg/minikube/mustload" ) const ( @@ -51,62 +48,30 @@ var logsCmd = &cobra.Command{ Short: "Gets the logs of the running instance, used for debugging minikube, not user code.", Long: `Gets the logs of the running instance, used for debugging minikube, not user code.`, Run: func(cmd *cobra.Command, args []string) { - cfg, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("Error getting config", err) - } + co := mustload.Running(ClusterFlagValue()) - if nodeName == "" { - cp, err := config.PrimaryControlPlane(cfg) - if err != nil { - exit.WithError("Error getting primary control plane", err) - } - nodeName = cp.Name - } - - n, _, err := node.Retrieve(cfg, nodeName) - if err != nil { - exit.WithError("Error retrieving node", err) - } - - machineName := driver.MachineName(*cfg, *n) - - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - - h, err := api.Load(machineName) - if err != nil { - exit.WithError("api load", err) - } - runner, err := machine.CommandRunner(h) - if err != nil { - exit.WithError("command runner", err) - } - bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper), *cfg, *n) + bs, err := cluster.Bootstrapper(co.API, viper.GetString(cmdcfg.Bootstrapper), *co.Config, *co.CPNode) if err != nil { exit.WithError("Error getting cluster bootstrapper", err) } - cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: runner}) + cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: co.CPRunner}) if err != nil { exit.WithError("Unable to get runtime", err) } if followLogs { - err := logs.Follow(cr, bs, *cfg, runner) + err := logs.Follow(cr, bs, *co.Config, co.CPRunner) if err != nil { exit.WithError("Follow", err) } return } if showProblems { - problems := logs.FindProblems(cr, bs, *cfg, runner) + problems := logs.FindProblems(cr, bs, *co.Config, co.CPRunner) logs.OutputProblems(problems, numberOfProblems) return } - err = logs.Output(cr, bs, *cfg, runner, numberOfLines) + err = logs.Output(cr, bs, *co.Config, co.CPRunner, numberOfLines) if err != nil { exit.WithError("Error getting machine logs", err) } diff --git a/cmd/minikube/cmd/mount.go b/cmd/minikube/cmd/mount.go index 46ea74b524..570a76447e 100644 --- a/cmd/minikube/cmd/mount.go +++ b/cmd/minikube/cmd/mount.go @@ -30,12 +30,10 @@ import ( "github.com/golang/glog" "github.com/pkg/errors" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/cluster" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/third_party/go9p/ufs" ) @@ -99,30 +97,16 @@ var mountCmd = &cobra.Command{ if glog.V(1) { debugVal = 1 // ufs.StartServer takes int debug param } - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("Error getting config", err) - } - cp, err := config.PrimaryControlPlane(cc) - if err != nil { - exit.WithError("Error getting primary cp", err) - } - host, err := api.Load(driver.MachineName(*cc, cp)) - if err != nil { - exit.WithError("Error loading api", err) - } - if host.Driver.DriverName() == driver.None { + co := mustload.Running(ClusterFlagValue()) + if co.CPHost.Driver.DriverName() == driver.None { exit.UsageT(`'none' driver does not support 'minikube mount' command`) } + var ip net.IP + var err error if mountIP == "" { - ip, err = cluster.GetVMHostIP(host) + ip, err = cluster.GetVMHostIP(co.CPHost) if err != nil { exit.WithError("Error getting the host IP address to use from within the VM", err) } @@ -163,7 +147,7 @@ var mountCmd = &cobra.Command{ } bindIP := ip.String() // the ip to listen on the user's host machine - if driver.IsKIC(host.Driver.DriverName()) && runtime.GOOS != "linux" { + if driver.IsKIC(co.CPHost.Driver.DriverName()) && runtime.GOOS != "linux" { bindIP = "127.0.0.1" } out.T(out.Mounting, "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...", out.V{"sourcePath": hostPath, "destinationPath": vmPath}) @@ -187,19 +171,13 @@ var mountCmd = &cobra.Command{ }() } - // Use CommandRunner, as the native docker ssh service dies when Ctrl-C is received. - runner, err := machine.CommandRunner(host) - if err != nil { - exit.WithError("Failed to get command runner", err) - } - // Unmount if Ctrl-C or kill request is received. c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) go func() { for sig := range c { out.T(out.Unmount, "Unmounting {{.path}} ...", out.V{"path": vmPath}) - err := cluster.Unmount(runner, vmPath) + err := cluster.Unmount(co.CPRunner, vmPath) if err != nil { out.ErrT(out.FailureType, "Failed unmount: {{.error}}", out.V{"error": err}) } @@ -207,7 +185,7 @@ var mountCmd = &cobra.Command{ } }() - err = cluster.Mount(runner, ip.String(), vmPath, cfg) + err = cluster.Mount(co.CPRunner, ip.String(), vmPath, cfg) if err != nil { exit.WithError("mount failed", err) } diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index d593639b4d..b923b9e935 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -19,10 +19,10 @@ package cmd import ( "github.com/spf13/cobra" "github.com/spf13/pflag" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -36,11 +36,8 @@ var nodeAddCmd = &cobra.Command{ Short: "Adds a node to the given cluster.", Long: "Adds a node to the given cluster config, and starts it.", Run: func(cmd *cobra.Command, args []string) { - profile := viper.GetString(config.ProfileName) - cc, err := config.Load(profile) - if err != nil { - exit.WithError("Error getting config", err) - } + co := mustload.Healthy(ClusterFlagValue()) + cc := co.Config if driver.BareMetal(cc.Driver) { out.ErrT(out.FailureType, "none driver does not support multi-node clusters") @@ -48,7 +45,7 @@ var nodeAddCmd = &cobra.Command{ name := node.Name(len(cc.Nodes) + 1) - out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) + out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. n := config.Node{ @@ -58,12 +55,11 @@ var nodeAddCmd = &cobra.Command{ KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, } - err = node.Add(cc, n) - if err != nil { + if err := node.Add(cc, n); err != nil { exit.WithError("Error adding node to cluster", err) } - out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": profile}) + out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name}) }, } diff --git a/cmd/minikube/cmd/node_delete.go b/cmd/minikube/cmd/node_delete.go index 8f43749a27..bca5c497c4 100644 --- a/cmd/minikube/cmd/node_delete.go +++ b/cmd/minikube/cmd/node_delete.go @@ -18,9 +18,8 @@ package cmd import ( "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -36,16 +35,10 @@ var nodeDeleteCmd = &cobra.Command{ } name := args[0] - profile := viper.GetString(config.ProfileName) - out.T(out.DeletingHost, "Deleting node {{.name}} from cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) + co := mustload.Healthy(ClusterFlagValue()) + out.T(out.DeletingHost, "Deleting node {{.name}} from cluster {{.cluster}}", out.V{"name": name, "cluster": co.Config.Name}) - cc, err := config.Load(profile) - if err != nil { - exit.WithError("loading config", err) - } - - err = node.Delete(*cc, name) - if err != nil { + if err := node.Delete(*co.Config, name); err != nil { exit.WithError("deleting node", err) } diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index 17e3da8694..285de93d7b 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -20,10 +20,9 @@ import ( "os" "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -37,24 +36,14 @@ var nodeStartCmd = &cobra.Command{ exit.UsageT("Usage: minikube node start [name]") } + api, cc := mustload.Partial(ClusterFlagValue()) name := args[0] - // Make sure it's not running - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("creating api client", err) - } - if machine.IsRunning(api, name) { out.T(out.Check, "{{.name}} is already running", out.V{"name": name}) os.Exit(0) } - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("loading config", err) - } - n, _, err := node.Retrieve(cc, name) if err != nil { exit.WithError("retrieving node", err) diff --git a/cmd/minikube/cmd/node_stop.go b/cmd/minikube/cmd/node_stop.go index e2a37573b8..5dbceba1bc 100644 --- a/cmd/minikube/cmd/node_stop.go +++ b/cmd/minikube/cmd/node_stop.go @@ -18,11 +18,10 @@ package cmd import ( "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -37,16 +36,7 @@ var nodeStopCmd = &cobra.Command{ } name := args[0] - - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("creating api client", err) - } - - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("getting config", err) - } + api, cc := mustload.Partial(ClusterFlagValue()) n, _, err := node.Retrieve(cc, name) if err != nil { diff --git a/cmd/minikube/cmd/pause.go b/cmd/minikube/cmd/pause.go index 4b63ed963c..33ef1f9f04 100644 --- a/cmd/minikube/cmd/pause.go +++ b/cmd/minikube/cmd/pause.go @@ -17,7 +17,6 @@ limitations under the License. package cmd import ( - "os" "strings" "github.com/golang/glog" @@ -25,11 +24,11 @@ import ( "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/cluster" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" ) @@ -46,27 +45,10 @@ var pauseCmd = &cobra.Command{ } func runPause(cmd *cobra.Command, args []string) { - cname := viper.GetString(config.ProfileName) - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - cc, err := config.Load(cname) + co := mustload.Running(ClusterFlagValue()) - if err != nil && !config.IsNotExist(err) { - exit.WithError("Error loading profile config", err) - } - - if err != nil { - out.ErrT(out.Meh, `"{{.name}}" profile does not exist`, out.V{"name": cname}) - os.Exit(1) - } - - glog.Infof("config: %+v", cc) - - for _, n := range cc.Nodes { - host, err := machine.LoadHost(api, driver.MachineName(*cc, n)) + for _, n := range co.Config.Nodes { + host, err := machine.LoadHost(co.API, driver.MachineName(*co.Config, n)) if err != nil { exit.WithError("Error getting host", err) } @@ -76,7 +58,7 @@ func runPause(cmd *cobra.Command, args []string) { exit.WithError("Failed to get command runner", err) } - cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r}) + cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: r}) if err != nil { exit.WithError("Failed runtime", err) } diff --git a/cmd/minikube/cmd/podman-env.go b/cmd/minikube/cmd/podman-env.go index f68191f539..c55ba5f5f0 100644 --- a/cmd/minikube/cmd/podman-env.go +++ b/cmd/minikube/cmd/podman-env.go @@ -27,16 +27,13 @@ import ( "strings" "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/host" "github.com/docker/machine/libmachine/ssh" - "github.com/docker/machine/libmachine/state" "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/shell" ) @@ -67,15 +64,16 @@ func podmanShellCfgSet(ec PodmanEnvConfig, envMap map[string]string) *PodmanShel } // isPodmanAvailable checks if Podman is available -func isPodmanAvailable(host *host.Host) (bool, error) { - // we need both "varlink bridge" and "podman varlink" - if _, err := host.RunSSHCommand("which varlink"); err != nil { - return false, err +func isPodmanAvailable(r command.Runner) bool { + if _, err := r.RunCmd(exec.Command("which", "varlink")); err != nil { + return false } - if _, err := host.RunSSHCommand("which podman"); err != nil { - return false, err + + if _, err := r.RunCmd(exec.Command("which", "podman")); err != nil { + return false } - return true, nil + + return true } func createExternalSSHClient(d drivers.Driver) (*ssh.ExternalClient, error) { @@ -108,75 +106,49 @@ var podmanEnvCmd = &cobra.Command{ Short: "Sets up podman env variables; similar to '$(podman-machine env)'", Long: `Sets up podman env variables; similar to '$(podman-machine env)'.`, Run: func(cmd *cobra.Command, args []string) { - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) + cname := ClusterFlagValue() + co := mustload.Running(cname) + driverName := co.CPHost.DriverName + + if driverName == driver.None { + exit.UsageT(`'none' driver does not support 'minikube podman-env' command`) } - defer api.Close() - profile := viper.GetString(config.ProfileName) - cc, err := config.Load(profile) - if err != nil { - exit.WithError("Error getting config", err) + if ok := isPodmanAvailable(co.CPRunner); !ok { + exit.WithCodeT(exit.Unavailable, `The podman service within '{{.cluster}}' is not active`, out.V{"cluster": cname}) } - for _, n := range cc.Nodes { - machineName := driver.MachineName(*cc, n) - host, err := machine.LoadHost(api, machineName) + + client, err := createExternalSSHClient(co.CPHost.Driver) + if err != nil { + exit.WithError("Error getting ssh client", err) + } + + sh := shell.EnvConfig{ + Shell: shell.ForceShell, + } + ec := PodmanEnvConfig{ + EnvConfig: sh, + profile: cname, + driver: driverName, + client: client, + } + + if ec.Shell == "" { + ec.Shell, err = shell.Detect() if err != nil { - exit.WithError("Error getting host", err) - } - if host.Driver.DriverName() == driver.None { - exit.UsageT(`'none' driver does not support 'minikube podman-env' command`) + exit.WithError("Error detecting shell", err) } + } - hostSt, err := machine.Status(api, machineName) - if err != nil { - exit.WithError("Error getting host status", err) - } - if hostSt != state.Running.String() { - exit.WithCodeT(exit.Unavailable, `'{{.profile}}' is not running`, out.V{"profile": profile}) - } - ok, err := isPodmanAvailable(host) - if err != nil { - exit.WithError("Error getting service status", err) + if podmanUnset { + if err := podmanUnsetScript(ec, os.Stdout); err != nil { + exit.WithError("Error generating unset output", err) } + return + } - if !ok { - exit.WithCodeT(exit.Unavailable, `The podman service within '{{.profile}}' is not active`, out.V{"profile": profile}) - } - - client, err := createExternalSSHClient(host.Driver) - if err != nil { - exit.WithError("Error getting ssh client", err) - } - - sh := shell.EnvConfig{ - Shell: shell.ForceShell, - } - ec := PodmanEnvConfig{ - EnvConfig: sh, - profile: profile, - driver: host.DriverName, - client: client, - } - - if ec.Shell == "" { - ec.Shell, err = shell.Detect() - if err != nil { - exit.WithError("Error detecting shell", err) - } - } - - if podmanUnset { - if err := podmanUnsetScript(ec, os.Stdout); err != nil { - exit.WithError("Error generating unset output", err) - } - return - } - - if err := podmanSetScript(ec, os.Stdout); err != nil { - exit.WithError("Error generating set output", err) - } + if err := podmanSetScript(ec, os.Stdout); err != nil { + exit.WithError("Error generating set output", err) } }, } diff --git a/cmd/minikube/cmd/service.go b/cmd/minikube/cmd/service.go index 08c0fb87d5..9c673790be 100644 --- a/cmd/minikube/cmd/service.go +++ b/cmd/minikube/cmd/service.go @@ -32,15 +32,11 @@ import ( "github.com/golang/glog" "github.com/pkg/browser" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/drivers/kic/oci" - "k8s.io/minikube/pkg/minikube/config" - pkg_config "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/service" "k8s.io/minikube/pkg/minikube/tunnel/kic" @@ -78,32 +74,16 @@ var serviceCmd = &cobra.Command{ } svc := args[0] - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - profileName := viper.GetString(pkg_config.ProfileName) - cfg, err := config.Load(profileName) - if err != nil { - exit.WithError("Error getting config", err) - } - cp, err := config.PrimaryControlPlane(cfg) - if err != nil { - exit.WithError("Error getting control plane", err) - } - machineName := driver.MachineName(*cfg, cp) - if !machine.IsRunning(api, machineName) { - os.Exit(1) - } + cname := ClusterFlagValue() + co := mustload.Healthy(cname) - if runtime.GOOS == "darwin" && cfg.Driver == oci.Docker { - startKicServiceTunnel(svc, cfg.Name) + if runtime.GOOS == "darwin" && co.Config.Driver == oci.Docker { + startKicServiceTunnel(svc, cname) return } - urls, err := service.WaitForService(api, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval) + urls, err := service.WaitForService(co.API, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval) if err != nil { var s *service.SVCNotFoundError if errors.As(err, &s) { diff --git a/cmd/minikube/cmd/service_list.go b/cmd/minikube/cmd/service_list.go index f22aa71d23..8932c75fea 100644 --- a/cmd/minikube/cmd/service_list.go +++ b/cmd/minikube/cmd/service_list.go @@ -22,14 +22,10 @@ import ( "strings" "github.com/spf13/cobra" - "github.com/spf13/viper" core "k8s.io/api/core/v1" "k8s.io/minikube/pkg/drivers/kic/oci" - "k8s.io/minikube/pkg/minikube/config" - pkg_config "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/service" ) @@ -42,24 +38,9 @@ var serviceListCmd = &cobra.Command{ Short: "Lists the URLs for the services in your local cluster", Long: `Lists the URLs for the services in your local cluster`, Run: func(cmd *cobra.Command, args []string) { - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - profileName := viper.GetString(pkg_config.ProfileName) - cfg, err := config.Load(profileName) - if err != nil { - exit.WithError("Error getting config", err) - } - cp, err := config.PrimaryControlPlane(cfg) - if err != nil { - exit.WithError("Error getting primary control plane", err) - } - if !machine.IsRunning(api, driver.MachineName(*cfg, cp)) { - exit.WithCodeT(exit.Unavailable, "profile {{.name}} is not running.", out.V{"name": profileName}) - } - serviceURLs, err := service.GetServiceURLs(api, serviceListNamespace, serviceURLTemplate) + co := mustload.Healthy(ClusterFlagValue()) + + serviceURLs, err := service.GetServiceURLs(co.API, serviceListNamespace, serviceURLTemplate) if err != nil { out.FatalT("Failed to get service URL: {{.error}}", out.V{"error": err}) out.ErrT(out.Notice, "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.") @@ -74,7 +55,7 @@ var serviceListCmd = &cobra.Command{ serviceURLs := strings.Join(serviceURL.URLs, "\n") // if we are running Docker on OSX we empty the internal service URLs - if runtime.GOOS == "darwin" && cfg.Driver == oci.Docker { + if runtime.GOOS == "darwin" && co.Config.Driver == oci.Docker { serviceURLs = "" } diff --git a/cmd/minikube/cmd/ssh-key.go b/cmd/minikube/cmd/ssh-key.go index 61d2c441a8..b7a0ddf0de 100644 --- a/cmd/minikube/cmd/ssh-key.go +++ b/cmd/minikube/cmd/ssh-key.go @@ -20,10 +20,8 @@ import ( "path/filepath" "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" ) @@ -33,10 +31,7 @@ var sshKeyCmd = &cobra.Command{ Short: "Retrieve the ssh identity key path of the specified cluster", Long: "Retrieve the ssh identity key path of the specified cluster.", Run: func(cmd *cobra.Command, args []string) { - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("Getting machine config failed", err) - } + _, cc := mustload.Partial(ClusterFlagValue()) out.Ln(filepath.Join(localpath.MiniPath(), "machines", cc.Name, "id_rsa")) }, } diff --git a/cmd/minikube/cmd/ssh.go b/cmd/minikube/cmd/ssh.go index 4a8508ec5d..a18ae297de 100644 --- a/cmd/minikube/cmd/ssh.go +++ b/cmd/minikube/cmd/ssh.go @@ -21,12 +21,12 @@ import ( "github.com/docker/machine/libmachine/ssh" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -41,42 +41,30 @@ var sshCmd = &cobra.Command{ Short: "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'", Long: "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.", Run: func(cmd *cobra.Command, args []string) { - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("Error getting config", err) + cname := ClusterFlagValue() + co := mustload.Running(cname) + if co.CPHost.DriverName == driver.None { + exit.UsageT("'none' driver does not support 'minikube ssh' command") } + + var err error var n *config.Node if nodeName == "" { - cp, err := config.PrimaryControlPlane(cc) - if err != nil { - exit.WithError("Getting primary control plane", err) - } - n = &cp + n = co.CPNode } else { - n, _, err = node.Retrieve(cc, nodeName) + n, _, err = node.Retrieve(co.Config, nodeName) if err != nil { exit.WithCodeT(exit.Unavailable, "Node {{.nodeName}} does not exist.", out.V{"nodeName": nodeName}) } } - host, err := machine.LoadHost(api, driver.MachineName(*cc, *n)) - if err != nil { - exit.WithError("Error getting host", err) - } - if host.Driver.DriverName() == driver.None { - exit.UsageT("'none' driver does not support 'minikube ssh' command") - } + if nativeSSHClient { ssh.SetDefaultClient(ssh.Native) } else { ssh.SetDefaultClient(ssh.External) } - err = machine.CreateSSHShell(api, *cc, *n, args) + err = machine.CreateSSHShell(co.API, *co.Config, *n, args) if err != nil { // This is typically due to a non-zero exit code, so no need for flourish. out.ErrLn("ssh: %v", err) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 559407c04f..c5b3610d20 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -291,7 +291,7 @@ func runStart(cmd *cobra.Command, args []string) { registryMirror = viper.GetStringSlice("registry_mirror") } - existing, err := config.Load(viper.GetString(config.ProfileName)) + existing, err := config.Load(ClusterFlagValue()) if err != nil && !config.IsNotExist(err) { exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err}) } @@ -390,8 +390,8 @@ func updateDriver(driverName string) { func displayVersion(version string) { prefix := "" - if viper.GetString(config.ProfileName) != constants.DefaultClusterName { - prefix = fmt.Sprintf("[%s] ", viper.GetString(config.ProfileName)) + if ClusterFlagValue() != constants.DefaultClusterName { + prefix = fmt.Sprintf("[%s] ", ClusterFlagValue()) } versionState := out.Happy @@ -653,14 +653,6 @@ func selectImageRepository(mirrorCountry string, v semver.Version) (bool, string return false, fallback, nil } -// Return a minikube command containing the current profile name -func minikubeCmd() string { - if viper.GetString(config.ProfileName) != constants.DefaultClusterName { - return fmt.Sprintf("minikube -p %s", config.ProfileName) - } - return "minikube" -} - // validateUser validates minikube is run by the recommended user (privileged or regular) func validateUser(drvName string) { u, err := user.Current() @@ -686,7 +678,7 @@ func validateUser(drvName string) { if !useForce { os.Exit(exit.Permissions) } - _, err = config.Load(viper.GetString(config.ProfileName)) + _, err = config.Load(ClusterFlagValue()) if err == nil || !config.IsNotExist(err) { out.T(out.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete", out.V{"cmd": minikubeCmd()}) } @@ -811,7 +803,7 @@ func validateFlags(cmd *cobra.Command, drvName string) { } if driver.BareMetal(drvName) { - if viper.GetString(config.ProfileName) != constants.DefaultClusterName { + if ClusterFlagValue() != constants.DefaultClusterName { exit.WithCodeT(exit.Config, "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/", out.V{"name": drvName}) } @@ -938,7 +930,7 @@ func createNode(cmd *cobra.Command, k8sVersion, kubeNodeName, drvName, repositor } cfg := config.ClusterConfig{ - Name: viper.GetString(config.ProfileName), + Name: ClusterFlagValue(), KeepContext: viper.GetBool(keepContext), EmbedCerts: viper.GetBool(embedCerts), MinikubeISO: viper.GetString(isoURL), @@ -971,7 +963,7 @@ func createNode(cmd *cobra.Command, k8sVersion, kubeNodeName, drvName, repositor NatNicType: viper.GetString(natNicType), KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: k8sVersion, - ClusterName: viper.GetString(config.ProfileName), + ClusterName: ClusterFlagValue(), APIServerName: viper.GetString(apiServerName), APIServerNames: apiServerNames, APIServerIPs: apiServerIPs, diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 44b96bc8e2..965ab56761 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -29,16 +29,14 @@ import ( "github.com/golang/glog" "github.com/pkg/errors" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify" "k8s.io/minikube/pkg/minikube/cluster" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/mustload" ) var statusFormat string @@ -101,24 +99,13 @@ var statusCmd = &cobra.Command{ exit.UsageT("Cannot use both --output and --format options") } - api, err := machine.NewAPIClient() - if err != nil { - exit.WithCodeT(exit.Unavailable, "Error getting client: {{.error}}", out.V{"error": err}) - } - defer api.Close() - - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - if config.IsNotExist(err) { - exit.WithCodeT(exitCode(&Status{}), `The "{{.name}}" cluster does not exist!`, out.V{"name": viper.GetString(config.ProfileName)}) - } - exit.WithError("getting config", err) - } + cname := ClusterFlagValue() + api, cc := mustload.Partial(cname) var st *Status for _, n := range cc.Nodes { machineName := driver.MachineName(*cc, n) - st, err = status(api, machineName, n.ControlPlane) + st, err := status(api, machineName, n.ControlPlane) if err != nil { glog.Errorf("status error: %v", err) } diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index 005284a022..6d79b0446f 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -24,13 +24,12 @@ import ( "github.com/golang/glog" "github.com/pkg/errors" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" - pkg_config "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/util/retry" ) @@ -46,17 +45,10 @@ itself, leaving all files intact. The cluster can be started again with the "sta // runStop handles the executes the flow of "minikube stop" func runStop(cmd *cobra.Command, args []string) { - profile := viper.GetString(pkg_config.ProfileName) - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() + cname := ClusterFlagValue() - cc, err := config.Load(profile) - if err != nil { - exit.WithError("Error getting cluster config", err) - } + api, cc := mustload.Partial(cname) + defer api.Close() for _, n := range cc.Nodes { nonexistent := stop(api, *cc, n) @@ -70,8 +62,7 @@ func runStop(cmd *cobra.Command, args []string) { out.T(out.Warning, "Unable to kill mount process: {{.error}}", out.V{"error": err}) } - err = kubeconfig.UnsetCurrentContext(profile, kubeconfig.PathFromEnv()) - if err != nil { + if err := kubeconfig.UnsetCurrentContext(cname, kubeconfig.PathFromEnv()); err != nil { exit.WithError("update config", err) } } diff --git a/cmd/minikube/cmd/tunnel.go b/cmd/minikube/cmd/tunnel.go index 493fc98797..dbf66cf110 100644 --- a/cmd/minikube/cmd/tunnel.go +++ b/cmd/minikube/cmd/tunnel.go @@ -27,13 +27,12 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/service" "k8s.io/minikube/pkg/minikube/tunnel" "k8s.io/minikube/pkg/minikube/tunnel/kic" @@ -51,6 +50,8 @@ var tunnelCmd = &cobra.Command{ }, Run: func(cmd *cobra.Command, args []string) { manager := tunnel.NewManager() + cname := ClusterFlagValue() + co := mustload.Healthy(cname) if cleanup { glog.Info("Checking for tunnels to cleanup...") @@ -60,13 +61,6 @@ var tunnelCmd = &cobra.Command{ return } - glog.Infof("Creating docker machine client...") - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("error creating machine client", err) - } - glog.Infof("Creating k8s client...") - // Tunnel uses the k8s clientset to query the API server for services in the LoadBalancerEmulator. // We define the tunnel and minikube error free if the API server responds within a second. // This also contributes to better UX, the tunnel status check can happen every second and @@ -76,11 +70,6 @@ var tunnelCmd = &cobra.Command{ exit.WithError("error creating clientset", err) } - cfg, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("Error getting config", err) - } - ctrlC := make(chan os.Signal, 1) signal.Notify(ctrlC, os.Interrupt) ctx, cancel := context.WithCancel(context.Background()) @@ -89,13 +78,13 @@ var tunnelCmd = &cobra.Command{ cancel() }() - if runtime.GOOS == "darwin" && cfg.Driver == oci.Docker { - port, err := oci.ForwardedPort(oci.Docker, cfg.Name, 22) + if runtime.GOOS == "darwin" && co.Config.Driver == oci.Docker { + port, err := oci.ForwardedPort(oci.Docker, cname, 22) if err != nil { exit.WithError("error getting ssh port", err) } sshPort := strconv.Itoa(port) - sshKey := filepath.Join(localpath.MiniPath(), "machines", cfg.Name, "id_rsa") + sshKey := filepath.Join(localpath.MiniPath(), "machines", cname, "id_rsa") kicSSHTunnel := kic.NewSSHTunnel(ctx, sshPort, sshKey, clientset.CoreV1()) err = kicSSHTunnel.Start() @@ -106,7 +95,7 @@ var tunnelCmd = &cobra.Command{ return } - done, err := manager.StartTunnel(ctx, cfg.Name, api, config.DefaultLoader, clientset.CoreV1()) + done, err := manager.StartTunnel(ctx, cname, co.API, config.DefaultLoader, clientset.CoreV1()) if err != nil { exit.WithError("error starting tunnel", err) } diff --git a/cmd/minikube/cmd/unpause.go b/cmd/minikube/cmd/unpause.go index 7549f1b008..d27801fc23 100644 --- a/cmd/minikube/cmd/unpause.go +++ b/cmd/minikube/cmd/unpause.go @@ -17,7 +17,6 @@ limitations under the License. package cmd import ( - "os" "strings" "github.com/golang/glog" @@ -25,11 +24,11 @@ import ( "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/cluster" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" ) @@ -38,27 +37,12 @@ var unpauseCmd = &cobra.Command{ Use: "unpause", Short: "unpause Kubernetes", Run: func(cmd *cobra.Command, args []string) { - cname := viper.GetString(config.ProfileName) - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - cc, err := config.Load(cname) + cname := ClusterFlagValue() + co := mustload.Running(cname) - if err != nil && !config.IsNotExist(err) { - exit.WithError("Error loading profile config", err) - } - - if err != nil { - out.ErrT(out.Meh, `"{{.name}}" profile does not exist`, out.V{"name": cname}) - os.Exit(1) - } - glog.Infof("config: %+v", cc) - - for _, n := range cc.Nodes { - machineName := driver.MachineName(*cc, n) - host, err := machine.LoadHost(api, machineName) + for _, n := range co.Config.Nodes { + machineName := driver.MachineName(*co.Config, n) + host, err := machine.LoadHost(co.API, machineName) if err != nil { exit.WithError("Error getting host", err) } @@ -68,7 +52,7 @@ var unpauseCmd = &cobra.Command{ exit.WithError("Failed to get command runner", err) } - cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r}) + cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: r}) if err != nil { exit.WithError("Failed runtime", err) } diff --git a/cmd/minikube/cmd/update-context.go b/cmd/minikube/cmd/update-context.go index 2532672bbc..39a76e5929 100644 --- a/cmd/minikube/cmd/update-context.go +++ b/cmd/minikube/cmd/update-context.go @@ -18,12 +18,9 @@ package cmd import ( "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/cluster" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" ) @@ -34,24 +31,16 @@ var updateContextCmd = &cobra.Command{ Long: `Retrieves the IP address of the running cluster, checks it with IP in kubeconfig, and corrects kubeconfig if incorrect.`, Run: func(cmd *cobra.Command, args []string) { - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - machineName := viper.GetString(config.ProfileName) - ip, err := cluster.GetHostDriverIP(api, machineName) - if err != nil { - exit.WithError("Error host driver ip status", err) - } - updated, err := kubeconfig.UpdateIP(ip, machineName, kubeconfig.PathFromEnv()) + cname := ClusterFlagValue() + co := mustload.Running(cname) + updated, err := kubeconfig.UpdateIP(co.DriverIP, cname, kubeconfig.PathFromEnv()) if err != nil { exit.WithError("update config", err) } if updated { - out.T(out.Celebrate, "{{.machine}} IP has been updated to point at {{.ip}}", out.V{"machine": machineName, "ip": ip}) + out.T(out.Celebrate, "{{.cluster}} IP has been updated to point at {{.ip}}", out.V{"cluster": cname, "ip": co.DriverIP}) } else { - out.T(out.Meh, "{{.machine}} IP was already correctly configured for {{.ip}}", out.V{"machine": machineName, "ip": ip}) + out.T(out.Meh, "{{.cluster}} IP was already correctly configured for {{.ip}}", out.V{"cluster": cname, "ip": co.DriverIP}) } }, diff --git a/pkg/minikube/mustload/mustload.go b/pkg/minikube/mustload/mustload.go new file mode 100644 index 0000000000..f7d25b6924 --- /dev/null +++ b/pkg/minikube/mustload/mustload.go @@ -0,0 +1,166 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// mustload loads minikube clusters, exiting with user-friendly messages +package mustload + +import ( + "fmt" + "net" + "os" + + "github.com/docker/machine/libmachine" + "github.com/docker/machine/libmachine/host" + "github.com/docker/machine/libmachine/state" + "k8s.io/minikube/pkg/drivers/kic/oci" + "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify" + "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/out" +) + +type ClusterController struct { + Config *config.ClusterConfig + API libmachine.API + CPHost *host.Host + CPNode *config.Node + CPRunner command.Runner + DriverIP net.IP +} + +// Partial is a cmd-friendly way to load a cluster which may or may not be running +func Partial(name string) (libmachine.API, *config.ClusterConfig) { + api, err := machine.NewAPIClient() + if err != nil { + exit.WithError("libmachine failed", err) + } + + cc, err := config.Load(name) + if err != nil { + if config.IsNotExist(err) { + out.T(out.Shrug, `There is no local cluster named "{{.cluster}}"`, out.V{"cluster": name}) + exitTip("start", name, exit.Data) + } + exit.WithError("Error getting cluster config", err) + } + + return api, cc +} + +// Running is a cmd-friendly way to load a running cluster +func Running(name string) ClusterController { + api, cc := Partial(name) + + cp, err := config.PrimaryControlPlane(cc) + if err != nil { + exit.WithError("Unable to find control plane", err) + } + + hs, err := machine.Status(api, cp.Name) + if err != nil { + exit.WithError("Unable to get machine status", err) + } + + if hs == state.None.String() { + out.T(out.Shrug, `The control plane node "{{.name}}" does not exist.`, out.V{"name": cp.Name}) + exitTip("start", name, exit.Unavailable) + } + + if hs == state.Stopped.String() { + out.T(out.Shrug, `The control plane node must be running for this command`) + exitTip("start", name, exit.Unavailable) + } + + if hs != state.Running.String() { + out.T(out.Shrug, `The control plane node is not running (state={{.state}})`, out.V{"name": cp.Name, "state": hs}) + exitTip("start", name, exit.Unavailable) + } + + host, err := machine.LoadHost(api, name) + if err != nil { + exit.WithError("Unable to load host", err) + } + + cr, err := machine.CommandRunner(host) + if err != nil { + exit.WithError("Unable to get command runner", err) + } + + ips, err := host.Driver.GetIP() + if err != nil { + exit.WithError("Unable to get driver IP", err) + } + + if driver.IsKIC(host.DriverName) { + ips = oci.DefaultBindIPV4 + } + + ip := net.ParseIP(ips) + if ip == nil { + exit.WithCodeT(exit.Software, fmt.Sprintf("Unable to parse driver IP: %q", ips)) + } + + return ClusterController{ + API: api, + Config: cc, + CPRunner: cr, + CPHost: host, + CPNode: &cp, + DriverIP: ip, + } +} + +// Healthy is a cmd-friendly way to load a healthy cluster +func Healthy(name string) ClusterController { + co := Running(name) + + as, err := kverify.APIServerStatus(co.CPRunner, net.ParseIP(co.CPNode.IP), co.CPNode.Port) + if err != nil { + out.T(out.FailureType, `Unable to get control plane status: {{.error}}`, out.V{"error": err}) + exitTip("delete", name, exit.Unavailable) + } + + if as == state.Paused { + out.T(out.Shrug, `The control plane for "{{.name}}" is paused!`, out.V{"name": name}) + exitTip("unpause", name, exit.Unavailable) + } + + if as != state.Running { + out.T(out.Shrug, `This control plane is not running! (state={{.state}})`, out.V{"state": as.String()}) + out.T(out.Warning, `This is unusual - you may want to investigate using "{{.command}} logs"`, out.V{"command": minikubeCmd(name)}) + exitTip("start", name, exit.Unavailable) + } + return co +} + +// Return a minikube command containing the current profile name +func minikubeCmd(cname string) string { + if cname != constants.DefaultClusterName { + return fmt.Sprintf("minikube -p %s", cname) + } + return "minikube" +} + +// exitTip returns an action tip and exits +func exitTip(action string, profile string, code int) { + command := minikubeCmd(profile) + " " + action + out.T(out.Workaround, "To fix this, run: {{.command}}", out.V{"command": command}) + os.Exit(code) +} diff --git a/pkg/minikube/out/style.go b/pkg/minikube/out/style.go index 8154f46980..150a7af30d 100644 --- a/pkg/minikube/out/style.go +++ b/pkg/minikube/out/style.go @@ -83,6 +83,8 @@ var styles = map[StyleEnum]style{ Sparkle: {Prefix: "✨ "}, Pause: {Prefix: "⏸️ "}, Unpause: {Prefix: "⏯️ "}, + Confused: {Prefix: "😕 "}, + Shrug: {Prefix: "🤷 "}, // Specialized purpose styles ISODownload: {Prefix: "💿 "}, diff --git a/pkg/minikube/out/style_enum.go b/pkg/minikube/out/style_enum.go index d5937e2383..747c277faf 100644 --- a/pkg/minikube/out/style_enum.go +++ b/pkg/minikube/out/style_enum.go @@ -61,6 +61,7 @@ const ( DeletingHost Copying Connectivity + Confused Internet Mounting Celebrate @@ -89,4 +90,5 @@ const ( DryRun AddonEnable AddonDisable + Shrug ) From 77f97a58f470802f0e66497d92e401f1aa29f7f2 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 23 Mar 2020 13:16:37 -0700 Subject: [PATCH 168/668] Fix uninitalized variable --- cmd/minikube/cmd/status.go | 7 ++++++- pkg/minikube/mustload/mustload.go | 2 ++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 965ab56761..0db9e57ea8 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -103,9 +103,13 @@ var statusCmd = &cobra.Command{ api, cc := mustload.Partial(cname) var st *Status + var err error for _, n := range cc.Nodes { + glog.Infof("checking status of %s ...", n.Name) machineName := driver.MachineName(*cc, n) - st, err := status(api, machineName, n.ControlPlane) + st, err = status(api, machineName, n.ControlPlane) + glog.Infof("%s status: %+v", machineName, st) + if err != nil { glog.Errorf("status error: %v", err) } @@ -127,6 +131,7 @@ var statusCmd = &cobra.Command{ } } + // TODO: Update for multi-node os.Exit(exitCode(st)) }, } diff --git a/pkg/minikube/mustload/mustload.go b/pkg/minikube/mustload/mustload.go index f7d25b6924..1cdc8358b8 100644 --- a/pkg/minikube/mustload/mustload.go +++ b/pkg/minikube/mustload/mustload.go @@ -25,6 +25,7 @@ import ( "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/host" "github.com/docker/machine/libmachine/state" + "github.com/golang/glog" "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify" "k8s.io/minikube/pkg/minikube/command" @@ -47,6 +48,7 @@ type ClusterController struct { // Partial is a cmd-friendly way to load a cluster which may or may not be running func Partial(name string) (libmachine.API, *config.ClusterConfig) { + glog.Infof("Loading cluster: %s", name) api, err := machine.NewAPIClient() if err != nil { exit.WithError("libmachine failed", err) From b8c7638976349d80828f3ccc7ca475d14d581883 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 23 Mar 2020 13:17:34 -0700 Subject: [PATCH 169/668] Add flag functions --- cmd/minikube/cmd/config/flags.go | 11 +++++++++++ cmd/minikube/cmd/flags.go | 23 +++++++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 cmd/minikube/cmd/config/flags.go create mode 100644 cmd/minikube/cmd/flags.go diff --git a/cmd/minikube/cmd/config/flags.go b/cmd/minikube/cmd/config/flags.go new file mode 100644 index 0000000000..4df6082a40 --- /dev/null +++ b/cmd/minikube/cmd/config/flags.go @@ -0,0 +1,11 @@ +package config + +import ( + "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/config" +) + +// ClusterFlagValue returns the current cluster name based on flags +func ClusterFlagValue() string { + return viper.GetString(config.ProfileName) +} diff --git a/cmd/minikube/cmd/flags.go b/cmd/minikube/cmd/flags.go new file mode 100644 index 0000000000..c00bb54286 --- /dev/null +++ b/cmd/minikube/cmd/flags.go @@ -0,0 +1,23 @@ +package cmd + +import ( + "fmt" + + "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" +) + +// Return a minikube command containing the current profile name +func minikubeCmd() string { + cname := ClusterFlagValue() + if cname != constants.DefaultClusterName { + return fmt.Sprintf("minikube -p %s", cname) + } + return "minikube" +} + +// ClusterFlagValue returns the current cluster name based on flags +func ClusterFlagValue() string { + return viper.GetString(config.ProfileName) +} From 0ce81acbf49f9a56f702f132fb097fd98e15e1c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Mon, 23 Mar 2020 21:49:19 +0100 Subject: [PATCH 170/668] Centralize the handling of browser.OpenURL So we can catch when xdg-open not installed --- cmd/minikube/cmd/config/open.go | 3 +-- cmd/minikube/cmd/dashboard.go | 2 +- cmd/minikube/cmd/service.go | 2 +- pkg/minikube/browser/browser.go | 35 +++++++++++++++++++++++++++++++++ 4 files changed, 38 insertions(+), 4 deletions(-) create mode 100644 pkg/minikube/browser/browser.go diff --git a/cmd/minikube/cmd/config/open.go b/cmd/minikube/cmd/config/open.go index c4c8b5416d..dc4fd0fb7a 100644 --- a/cmd/minikube/cmd/config/open.go +++ b/cmd/minikube/cmd/config/open.go @@ -21,11 +21,10 @@ import ( "os" "text/template" - "github.com/pkg/browser" - "github.com/spf13/cobra" "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/assets" + "k8s.io/minikube/pkg/minikube/browser" "k8s.io/minikube/pkg/minikube/config" pkg_config "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" diff --git a/cmd/minikube/cmd/dashboard.go b/cmd/minikube/cmd/dashboard.go index b5fd848eb3..b65c53eb84 100644 --- a/cmd/minikube/cmd/dashboard.go +++ b/cmd/minikube/cmd/dashboard.go @@ -29,12 +29,12 @@ import ( "github.com/docker/machine/libmachine/mcnerror" "github.com/golang/glog" - "github.com/pkg/browser" "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" pkgaddons "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/assets" + "k8s.io/minikube/pkg/minikube/browser" "k8s.io/minikube/pkg/minikube/config" pkg_config "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" diff --git a/cmd/minikube/cmd/service.go b/cmd/minikube/cmd/service.go index 08c0fb87d5..ae14afda08 100644 --- a/cmd/minikube/cmd/service.go +++ b/cmd/minikube/cmd/service.go @@ -30,11 +30,11 @@ import ( "time" "github.com/golang/glog" - "github.com/pkg/browser" "github.com/spf13/cobra" "github.com/spf13/viper" "k8s.io/minikube/pkg/drivers/kic/oci" + "k8s.io/minikube/pkg/minikube/browser" "k8s.io/minikube/pkg/minikube/config" pkg_config "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" diff --git a/pkg/minikube/browser/browser.go b/pkg/minikube/browser/browser.go new file mode 100644 index 0000000000..dd6a4799db --- /dev/null +++ b/pkg/minikube/browser/browser.go @@ -0,0 +1,35 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package browser + +import ( + "os/exec" + "runtime" + + "github.com/pkg/browser" +) + +// OpenURL opens a new browser window pointing to URL. +func OpenURL(url string) error { + if runtime.GOOS == "linux" { + _, err := exec.LookPath("xdg-open") + if err != nil { + return err + } + } + return browser.OpenURL(url) +} From 54bbdde812e264e9bf31812363a89aa5a6fe4d51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Mon, 23 Mar 2020 22:11:22 +0100 Subject: [PATCH 171/668] Print the URL, instead of return an error Let the terminal user open it themselves --- pkg/minikube/browser/browser.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/minikube/browser/browser.go b/pkg/minikube/browser/browser.go index dd6a4799db..890e068844 100644 --- a/pkg/minikube/browser/browser.go +++ b/pkg/minikube/browser/browser.go @@ -21,6 +21,7 @@ import ( "runtime" "github.com/pkg/browser" + "k8s.io/minikube/pkg/minikube/out" ) // OpenURL opens a new browser window pointing to URL. @@ -28,7 +29,8 @@ func OpenURL(url string) error { if runtime.GOOS == "linux" { _, err := exec.LookPath("xdg-open") if err != nil { - return err + out.T(out.URL, url) + return nil } } return browser.OpenURL(url) From 29f5007f4604d802588b5e9b44e0b5d4195b1bf2 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 23 Mar 2020 14:25:47 -0700 Subject: [PATCH 172/668] Use correct machine name for status check --- pkg/minikube/mustload/mustload.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/minikube/mustload/mustload.go b/pkg/minikube/mustload/mustload.go index 1cdc8358b8..59ab08e212 100644 --- a/pkg/minikube/mustload/mustload.go +++ b/pkg/minikube/mustload/mustload.go @@ -75,7 +75,8 @@ func Running(name string) ClusterController { exit.WithError("Unable to find control plane", err) } - hs, err := machine.Status(api, cp.Name) + machineName := driver.MachineName(*cc, cp) + hs, err := machine.Status(api, machineName) if err != nil { exit.WithError("Unable to get machine status", err) } From b9049288fd9b82538ebef68d3381df9574692051 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 23 Mar 2020 14:25:59 -0700 Subject: [PATCH 173/668] Improve experience for paused cluster --- cmd/minikube/cmd/logs.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/logs.go b/cmd/minikube/cmd/logs.go index 098ab0d99f..f2a1fba453 100644 --- a/cmd/minikube/cmd/logs.go +++ b/cmd/minikube/cmd/logs.go @@ -17,6 +17,8 @@ limitations under the License. package cmd import ( + "os" + "github.com/spf13/cobra" "github.com/spf13/viper" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" @@ -25,6 +27,7 @@ import ( "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/logs" "k8s.io/minikube/pkg/minikube/mustload" + "k8s.io/minikube/pkg/minikube/out" ) const ( @@ -73,7 +76,10 @@ var logsCmd = &cobra.Command{ } err = logs.Output(cr, bs, *co.Config, co.CPRunner, numberOfLines) if err != nil { - exit.WithError("Error getting machine logs", err) + out.Ln("") + // Avoid exit.WithError, since it outputs the issue URL + out.T(out.Warning, "{{.error}}", out.V{"error": err}) + os.Exit(exit.Unavailable) } }, } From 81393a0bc39d688124be3746ea3ed14d50d3cab4 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 23 Mar 2020 14:39:49 -0700 Subject: [PATCH 174/668] Add boilerplate --- cmd/minikube/cmd/config/flags.go | 16 ++++++++++++++++ cmd/minikube/cmd/flags.go | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/cmd/minikube/cmd/config/flags.go b/cmd/minikube/cmd/config/flags.go index 4df6082a40..5a978ab08b 100644 --- a/cmd/minikube/cmd/config/flags.go +++ b/cmd/minikube/cmd/config/flags.go @@ -1,3 +1,19 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package config import ( diff --git a/cmd/minikube/cmd/flags.go b/cmd/minikube/cmd/flags.go index c00bb54286..6911e9a7c7 100644 --- a/cmd/minikube/cmd/flags.go +++ b/cmd/minikube/cmd/flags.go @@ -1,3 +1,19 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package cmd import ( From 74380b0eda5e290ab78aa1b53ff87cead82db0e6 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Mon, 23 Mar 2020 14:40:02 -0700 Subject: [PATCH 175/668] Fix output of ssh command test The /r isn't actually returned. --- test/integration/functional_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 8445aa364a..ec4ce69051 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -698,7 +698,7 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) { if NoneDriver() { t.Skipf("skipping: ssh unsupported by none") } - want := "hello\r\n" + want := "hello\n" rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("echo hello"))) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) From 80bd7b1532cf8ba91e81454731a7a82c18f95661 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Mon, 23 Mar 2020 22:41:06 +0100 Subject: [PATCH 176/668] Upgrade machine to machine-drivers/master 0.16.2 --- go.mod | 4 ++-- go.sum | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index a703f57861..3f4a02bf9f 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/docker/cli v0.0.0-20200303162255-7d407207c304 // indirect github.com/docker/docker v1.13.1 github.com/docker/go-units v0.4.0 - github.com/docker/machine v0.7.1-0.20190718054102-a555e4f7a8f5 // version is 0.7.1 to pin to a555e4f7a8f5 + github.com/docker/machine v0.7.1-0.20190902101342-b170508bf44c // v0.16.2^ github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f // indirect github.com/evanphx/json-patch v4.5.0+incompatible // indirect @@ -95,7 +95,7 @@ require ( replace ( git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999 github.com/docker/docker => github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7 - github.com/docker/machine => github.com/medyagh/machine v0.16.4 + github.com/docker/machine => github.com/machine-drivers/machine v0.7.1-0.20200323212942-41eb826190d8 github.com/hashicorp/go-getter => github.com/afbjorklund/go-getter v1.4.1-0.20190910175809-eb9f6c26742c github.com/samalba/dockerclient => github.com/sayboras/dockerclient v0.0.0-20191231050035-015626177a97 k8s.io/api => k8s.io/api v0.17.3 diff --git a/go.sum b/go.sum index 8a4fe67c98..29d0ae43c8 100644 --- a/go.sum +++ b/go.sum @@ -178,6 +178,8 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/machine v0.7.1-0.20190902101342-b170508bf44c h1:/CqsDCJqQY+xBxJ65ri4ZGw6jXugauXisH3roam3Ics= +github.com/docker/machine v0.7.1-0.20190902101342-b170508bf44c/go.mod h1:I8mPNDeK1uH+JTcUU7X0ZW8KiYz0jyAgNaeSJ1rCfDI= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -486,6 +488,8 @@ github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H7 github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= github.com/machine-drivers/docker-machine-driver-vmware v0.1.1 h1:+E1IKKk+6kaQrCPg6edJZ/zISZijuZTPnzy6RE4C/Ho= github.com/machine-drivers/docker-machine-driver-vmware v0.1.1/go.mod h1:ej014C83EmSnxJeJ8PtVb8OLJ91PJKO1Q8Y7sM5CK0o= +github.com/machine-drivers/machine v0.7.1-0.20200323212942-41eb826190d8 h1:CIddS19fAKG4rUkZAotX0WPQtx/v/SdLhhDU3MVhLy0= +github.com/machine-drivers/machine v0.7.1-0.20200323212942-41eb826190d8/go.mod h1:79Uwa2hGd5S39LDJt58s8JZcIhGEK6pkq9bsuTbFWbk= github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -517,8 +521,6 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2 h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= -github.com/medyagh/machine v0.16.4 h1:oEsH3C1TYzs5axakAI/K1yc5O3r6de0+mCGumX4aHwM= -github.com/medyagh/machine v0.16.4/go.mod h1:/HegrAvHvD0AGQYQaLfrmUqxQTQF3Ks9qkj34p/ZH40= github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV59fDr4= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= From 6f50b27a2045f90f41f31bb0bd06a421435095a4 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 23 Mar 2020 15:38:17 -0700 Subject: [PATCH 177/668] install conntrack for none tests --- .github/workflows/main.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a2daefe32d..a6146b5b5a 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -88,7 +88,7 @@ jobs: docker info || true docker version || true docker ps || true - - name: install lz4 + - name: Install lz4 shell: bash run: | sudo apt-get update -qq @@ -228,11 +228,11 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-16.04 steps: - - name: install lz4 + - name: Install conntrack shell: bash run: | sudo apt-get update -qq - sudo apt-get -qq -y install liblz4-tool + sudo apt-get -qq -y install conntrack - name: Install gopogh shell: bash run: | @@ -295,11 +295,11 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: - - name: install lz4 + - name: Install conntrack shell: bash run: | sudo apt-get update -qq - sudo apt-get -qq -y install liblz4-tool + sudo apt-get -qq -y install conntrack - name: Install gopogh shell: bash run: | From 31dec90cc890faaad2b5db7d7075fa331f9c5fa4 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 23 Mar 2020 22:45:22 +0000 Subject: [PATCH 178/668] formatting --- .github/workflows/main.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a2daefe32d..92259e9e85 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -88,7 +88,7 @@ jobs: docker info || true docker version || true docker ps || true - - name: install lz4 + - name: Install lz4 shell: bash run: | sudo apt-get update -qq @@ -155,7 +155,7 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 needs: [build_minikube] steps: - - name: install lz4 + - name: Install lz4 shell: bash run: | sudo apt-get update -qq @@ -228,7 +228,7 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-16.04 steps: - - name: install lz4 + - name: Install lz4 shell: bash run: | sudo apt-get update -qq @@ -295,7 +295,7 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: - - name: install lz4 + - name: Install lz4 shell: bash run: | sudo apt-get update -qq @@ -362,12 +362,12 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: - - name: install lz4 + - name: Install lz4 shell: bash run: | sudo apt-get update -qq sudo apt-get -qq -y install liblz4-tool - - name: install podman + - name: Install podman shell: bash run: | . /etc/os-release From 61c3e2d6a087942fc6cd803f082cfa12b6682258 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 23 Mar 2020 15:46:45 -0700 Subject: [PATCH 179/668] formatting --- .github/workflows/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a6146b5b5a..3ade2e60d5 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -155,7 +155,7 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 needs: [build_minikube] steps: - - name: install lz4 + - name: Install lz4 shell: bash run: | sudo apt-get update -qq @@ -362,12 +362,12 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: - - name: install lz4 + - name: Install lz4 shell: bash run: | sudo apt-get update -qq sudo apt-get -qq -y install liblz4-tool - - name: install podman + - name: Install podman shell: bash run: | . /etc/os-release From 673341ce35e51fe9aa50a574d1230382b84034d7 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 23 Mar 2020 15:51:02 -0700 Subject: [PATCH 180/668] Improve hostDriver comment --- cmd/minikube/cmd/start.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 6b39b8955f..c349f5fa3f 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -520,7 +520,7 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState { return pick } -// hostDriver returns the true driver used without relying on config fields +// hostDriver returns the actual driver used by a libmachine host, which can differ from our config func hostDriver(existing *config.ClusterConfig) string { if existing == nil { return "" From b99e9436846884f1073640bbc7bec47636404152 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 23 Mar 2020 16:00:53 -0700 Subject: [PATCH 181/668] Improve minikubeCmd, promote to public function --- cmd/minikube/cmd/flags.go | 12 ------------ cmd/minikube/cmd/start.go | 15 +++++---------- pkg/minikube/mustload/mustload.go | 10 +++++----- 3 files changed, 10 insertions(+), 27 deletions(-) diff --git a/cmd/minikube/cmd/flags.go b/cmd/minikube/cmd/flags.go index 6911e9a7c7..4c8c0efa0f 100644 --- a/cmd/minikube/cmd/flags.go +++ b/cmd/minikube/cmd/flags.go @@ -17,22 +17,10 @@ limitations under the License. package cmd import ( - "fmt" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/constants" ) -// Return a minikube command containing the current profile name -func minikubeCmd() string { - cname := ClusterFlagValue() - if cname != constants.DefaultClusterName { - return fmt.Sprintf("minikube -p %s", cname) - } - return "minikube" -} - // ClusterFlagValue returns the current cluster name based on flags func ClusterFlagValue() string { return viper.GetString(config.ProfileName) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index c5b3610d20..ad8c52ad91 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -54,6 +54,7 @@ import ( "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/notify" "k8s.io/minikube/pkg/minikube/out" @@ -550,15 +551,9 @@ func validateSpecifiedDriver(existing *config.ClusterConfig) { if err != nil { exit.WithError("Error getting primary cp", err) } - machineName := driver.MachineName(*existing, cp) - h, err := api.Load(machineName) - if err != nil { - glog.Warningf("selectDriver api.Load: %v", err) - return - } - out.ErrT(out.Conflict, `The existing "{{.profile_name}}" VM was created using the "{{.old_driver}}" driver, and is incompatible with the "{{.driver}}" driver.`, - out.V{"profile_name": machineName, "driver": requested, "old_driver": h.Driver.DriverName()}) + out.ErrT(out.Conflict, `The existing "{{.name}}" ccluster was created using the "{{.old}}" driver, and is incompatible with the "{{.new}}" driver.`, + out.V{"name": existing.Name, "new": requested, "old": h.Driver.DriverName()}) out.ErrT(out.Workaround, `To proceed, either: @@ -566,8 +561,8 @@ func validateSpecifiedDriver(existing *config.ClusterConfig) { * or * -2) Start the existing "{{.profile_name}}" cluster using: '{{.command}} start --driver={{.old_driver}}' -`, out.V{"command": minikubeCmd(), "old_driver": h.Driver.DriverName(), "profile_name": machineName}) +2) Start the existing "{{.profile_name}}" cluster using: '{{.command}} --driver={{.old_driver}}' +`, out.V{"command": mustload.ExampleCmd(existing.Name, "start"), "old_driver": h.Driver.DriverName(), "profile_name": cname}) exit.WithCodeT(exit.Config, "Exiting.") } diff --git a/pkg/minikube/mustload/mustload.go b/pkg/minikube/mustload/mustload.go index 59ab08e212..4d6db076dd 100644 --- a/pkg/minikube/mustload/mustload.go +++ b/pkg/minikube/mustload/mustload.go @@ -153,17 +153,17 @@ func Healthy(name string) ClusterController { return co } -// Return a minikube command containing the current profile name -func minikubeCmd(cname string) string { +// ExampleCmd Return a minikube command containing the current profile name +func ExampleCmd(cname string, action string) string { if cname != constants.DefaultClusterName { - return fmt.Sprintf("minikube -p %s", cname) + return fmt.Sprintf("minikube %s -p %s", action, cname) } - return "minikube" + return fmt.Sprintf("minikube %s", action) } // exitTip returns an action tip and exits func exitTip(action string, profile string, code int) { - command := minikubeCmd(profile) + " " + action + command := ExampleCmd(profile, action) out.T(out.Workaround, "To fix this, run: {{.command}}", out.V{"command": command}) os.Exit(code) } From 2a2b3428fdb96bccea17f96e0dc6be7eaa1aa4fd Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 23 Mar 2020 16:42:39 -0700 Subject: [PATCH 182/668] add lz4 back --- .github/workflows/main.yml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 3ade2e60d5..d03af33653 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -228,10 +228,14 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-16.04 steps: - - name: Install conntrack + - name: Install lz4 shell: bash run: | sudo apt-get update -qq + sudo apt-get -qq -y install liblz4-tool + - name: Install conntrack + shell: bash + run: | sudo apt-get -qq -y install conntrack - name: Install gopogh shell: bash @@ -295,10 +299,14 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: - - name: Install conntrack + - name: Install lz4 shell: bash run: | sudo apt-get update -qq + sudo apt-get -qq -y install liblz4-tool + - name: Install conntrack + shell: bash + run: | sudo apt-get -qq -y install conntrack - name: Install gopogh shell: bash From 0ff8becc6323991db7cd3ea2c1ac967877144050 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 23 Mar 2020 17:09:12 -0700 Subject: [PATCH 183/668] install conntrack everywhere, check for vm-driver in tests --- .github/workflows/main.yml | 24 ++++++++++++++---------- test/integration/main.go | 4 ++-- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d03af33653..379b82f120 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -93,6 +93,10 @@ jobs: run: | sudo apt-get update -qq sudo apt-get -qq -y install liblz4-tool + - name: Install conntrack + shell: bash + run: | + sudo apt-get -qq -y install conntrack - name: Install gopogh shell: bash run: | @@ -160,6 +164,10 @@ jobs: run: | sudo apt-get update -qq sudo apt-get -qq -y install liblz4-tool + - name: Install conntrack + shell: bash + run: | + sudo apt-get -qq -y install conntrack - name: Docker Info shell: bash run: | @@ -228,14 +236,10 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-16.04 steps: - - name: Install lz4 - shell: bash - run: | - sudo apt-get update -qq - sudo apt-get -qq -y install liblz4-tool - name: Install conntrack shell: bash run: | + sudo apt-get update -qq sudo apt-get -qq -y install conntrack - name: Install gopogh shell: bash @@ -299,14 +303,10 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: - - name: Install lz4 - shell: bash - run: | - sudo apt-get update -qq - sudo apt-get -qq -y install liblz4-tool - name: Install conntrack shell: bash run: | + sudo apt-get update -qq sudo apt-get -qq -y install conntrack - name: Install gopogh shell: bash @@ -375,6 +375,10 @@ jobs: run: | sudo apt-get update -qq sudo apt-get -qq -y install liblz4-tool + - name: Install conntrack + shell: bash + run: | + sudo apt-get -qq -y install conntrack - name: Install podman shell: bash run: | diff --git a/test/integration/main.go b/test/integration/main.go index 3c159a6e42..04c22da305 100644 --- a/test/integration/main.go +++ b/test/integration/main.go @@ -60,12 +60,12 @@ func Target() string { // NoneDriver returns whether or not this test is using the none driver func NoneDriver() bool { - return strings.Contains(*startArgs, "--driver=none") + return strings.Contains(*startArgs, "--driver=none") || strings.Contains(*startArgs, "--vm-driver=none") } // HyperVDriver returns whether or not this test is using the Hyper-V driver func HyperVDriver() bool { - return strings.Contains(*startArgs, "--driver=hyperv") + return strings.Contains(*startArgs, "--driver=hyperv") || strings.Contains(*startArgs, "--vm-driver=hyperv") } // CanCleanup returns if cleanup is allowed From a42d1c40c13c0b130b41442d20730e3d605228d5 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 23 Mar 2020 17:21:44 -0700 Subject: [PATCH 184/668] conntrack only needed for none --- .github/workflows/main.yml | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 379b82f120..1304c3634c 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -93,10 +93,6 @@ jobs: run: | sudo apt-get update -qq sudo apt-get -qq -y install liblz4-tool - - name: Install conntrack - shell: bash - run: | - sudo apt-get -qq -y install conntrack - name: Install gopogh shell: bash run: | @@ -164,10 +160,6 @@ jobs: run: | sudo apt-get update -qq sudo apt-get -qq -y install liblz4-tool - - name: Install conntrack - shell: bash - run: | - sudo apt-get -qq -y install conntrack - name: Docker Info shell: bash run: | @@ -236,6 +228,7 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-16.04 steps: + # conntrack is required for kubernetes 1.18 and highter - name: Install conntrack shell: bash run: | @@ -303,6 +296,7 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: + # conntrack is required for kubernetes 1.18 and highter - name: Install conntrack shell: bash run: | @@ -375,10 +369,6 @@ jobs: run: | sudo apt-get update -qq sudo apt-get -qq -y install liblz4-tool - - name: Install conntrack - shell: bash - run: | - sudo apt-get -qq -y install conntrack - name: Install podman shell: bash run: | From 1273d4f8e756ac5eca89d94c449450fb2da21a5a Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 23 Mar 2020 17:22:47 -0700 Subject: [PATCH 185/668] typo --- .github/workflows/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 1304c3634c..fda40cd7f1 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -228,7 +228,7 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-16.04 steps: - # conntrack is required for kubernetes 1.18 and highter + # conntrack is required for kubernetes 1.18 and higher - name: Install conntrack shell: bash run: | @@ -296,7 +296,7 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: - # conntrack is required for kubernetes 1.18 and highter + # conntrack is required for kubernetes 1.18 and higher - name: Install conntrack shell: bash run: | From 82eafa57d4d8c2d2f9addb20d18f4f66437d21c9 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 23 Mar 2020 17:25:25 -0700 Subject: [PATCH 186/668] Smoothly retry host startup --- pkg/minikube/node/start.go | 39 ++++++++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 204ed14b97..7e3e1836b0 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -320,14 +320,45 @@ func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command. } // startHost starts a new minikube host using a VM or None -func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) { - host, exists, err := machine.StartHost(api, mc, n) - if err != nil { - exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) +func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, bool) { + host, exists, err := machine.StartHost(api, cc, n) + if err == nil { + return host, exists } + out.T(out.Embarrassed, "StartHost failed, but will try again: {{.error}}", out.V{"error": err}) + + // NOTE: People get very cranky if you delete their prexisting VM. Only delete new ones. + if !exists { + err := machine.DeleteHost(api, driver.MachineName(cc, n)) + if err != nil { + glog.Warningf("delete host: %v", err) + } + } + + // Try again, but just once to avoid copious error messages + time.Sleep(5 * time.Second) + + host, exists, err = machine.StartHost(api, cc, n) + if err == nil { + return host, exists + } + + out.T(out.FailureType, "StartHost failed again: {{.error}}", out.V{"error": err}) + out.T(out.Workaround, `Run: "{{.cmd}} delete", then "{{.cmd}} start --alsologtostderr -v=1" to try again with more logs`, + out.V{"cmd": minikubeCmd()}) + + exit.WithError("Unable to start VM after repeated tries. Please try {{'minikube delete' if possible", err) return host, exists } +// Return a minikube command containing the current profile name +func minikubeCmd() string { + if viper.GetString(config.ProfileName) != constants.DefaultClusterName { + return fmt.Sprintf("minikube -p %s", config.ProfileName) + } + return "minikube" +} + // validateNetwork tries to catch network problems as soon as possible func validateNetwork(h *host.Host, r command.Runner) string { ip, err := h.Driver.GetIP() From abf35cfbc15b52b1d5149f66d9224a6fa7d742a3 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Mon, 23 Mar 2020 17:41:06 -0700 Subject: [PATCH 187/668] Upgrade podman and add libglib2.0-0 to kicbase image The `TestOffline/group/crio` integration test was failing with the error: ``` crio load image: sudo podman load -i /var/lib/minikube/images/storage-provisioner_v1.8.1: exit status 125 ``` I tried SSH'ing into minikube and running the command myself, and got the error: ``` Error: could not get runtime: please update to v2.0.1 or later: outdated conmon version ``` I then tried updating conmon, but got the error: ``` conmon: error while loading shared libraries: libglib-2.0.so.0: cannot open shared object file: No such file or directory ``` I then tried adding the libglib2.0-0 library to the kic base image and upgrading podman, which seems to have resolved the issue. --- hack/images/kicbase.Dockerfile | 4 +++- pkg/drivers/kic/types.go | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/hack/images/kicbase.Dockerfile b/hack/images/kicbase.Dockerfile index baadf88df3..5afbe6e146 100644 --- a/hack/images/kicbase.Dockerfile +++ b/hack/images/kicbase.Dockerfile @@ -12,6 +12,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ docker.io=19.03.2-0ubuntu1 \ openssh-server=1:8.0p1-6build1 \ dnsutils=1:9.11.5.P4+dfsg-5.1ubuntu2.1 \ + # libglib2.0-0 is required for conmon, which is required for podman + libglib2.0-0=2.62.1-1 \ && rm /etc/crictl.yaml # install cri-o based on https://github.com/cri-o/cri-o/commit/96b0c34b31a9fc181e46d7d8e34fb8ee6c4dc4e1#diff-04c6e90faac2675aa89e2176d2eec7d8R128 RUN sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_19.10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" && \ @@ -19,7 +21,7 @@ RUN sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/lib apt-key add - < Release.key && apt-get update && \ apt-get install -y --no-install-recommends cri-o-1.17=1.17.0-3 # install podman -RUN apt-get install -y --no-install-recommends podman=1.8.0~7 +RUN apt-get install -y --no-install-recommends podman=1.8.2~1 # disable non-docker runtimes by default RUN systemctl disable containerd && systemctl disable crio && rm /etc/crictl.yaml # enable docker which is default diff --git a/pkg/drivers/kic/types.go b/pkg/drivers/kic/types.go index 32e2950396..d31aac0b85 100644 --- a/pkg/drivers/kic/types.go +++ b/pkg/drivers/kic/types.go @@ -30,9 +30,9 @@ const ( DefaultPodCIDR = "10.244.0.0/16" // Version is the current version of kic - Version = "v0.0.7" + Version = "v0.0.8" // SHA of the kic base image - baseImageSHA = "a6f288de0e5863cdeab711fa6bafa38ee7d8d285ca14216ecf84fcfb07c7d176" + baseImageSHA = "2f3380ebf1bb0c75b0b47160fd4e61b7b8fef0f1f32f9def108d3eada50a7a81" // OverlayImage is the cni plugin used for overlay image, created by kind. // CNI plugin image used for kic drivers created by kind. From 4f3c4dee06de1d4871bd660c6496bc298f94914c Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 23 Mar 2020 17:48:26 -0700 Subject: [PATCH 188/668] add nice error message for missing conntrack --- pkg/minikube/problem/err_map.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/minikube/problem/err_map.go b/pkg/minikube/problem/err_map.go index ec93a3569a..72547cf57f 100644 --- a/pkg/minikube/problem/err_map.go +++ b/pkg/minikube/problem/err_map.go @@ -179,6 +179,7 @@ var vmProblems = map[string]match{ GOOS: []string{"linux"}, Issues: []int{5950}, }, + // None "NONE_APISERVER_MISSING": { Regexp: re(`apiserver process never appeared`), @@ -225,6 +226,12 @@ var vmProblems = map[string]match{ Issues: []int{6083, 5636}, GOOS: []string{"linux"}, }, + "NONE_CONNTRACK": { + Regexp: re(`[ERROR FileExisting-conntrack]: conntrack not found in system path`), + Advice: "Kubernetes 1.18 and higher requires conntrack to be installed, to install it on your machine for the none driver simply run `apt-get install conntrack` as root.", + GOOS: []string{"linux"}, + }, + // VirtualBox "VBOX_BLOCKED": { Regexp: re(`NS_ERROR_FAILURE.*0x80004005`), From 091059b88619b6ff1c6764f309de66acb7dcee63 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Mon, 23 Mar 2020 17:55:00 -0700 Subject: [PATCH 189/668] Add timeout on needsTransfer function needsTransfer requires a network connection as it tries to get the digest of the image it is verifying. When running `TestOffline/group/crio`, LoadImages takes 6m30s to complete because it's waiting for i/o timeout. With this timeout, LoadImages takes ~45 seconds when running that integratin test. --- pkg/minikube/machine/cache_images.go | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/pkg/minikube/machine/cache_images.go b/pkg/minikube/machine/cache_images.go index 34b4379314..dd3fc58d9e 100644 --- a/pkg/minikube/machine/cache_images.go +++ b/pkg/minikube/machine/cache_images.go @@ -92,7 +92,11 @@ func LoadImages(cc *config.ClusterConfig, runner command.Runner, images []string for _, image := range images { image := image g.Go(func() error { - err := needsTransfer(imgClient, image, cr) + // Put a ten second limit on deciding if an image needs transfer + // because it takes much less than that time to just transfer the image. + // This is needed because if running in offline mode, we can spend minutes here + // waiting for i/o timeout. + err := timedNeedsTransfer(imgClient, image, cr, 10*time.Second) if err == nil { return nil } @@ -107,6 +111,28 @@ func LoadImages(cc *config.ClusterConfig, runner command.Runner, images []string return nil } +func timedNeedsTransfer(imgClient *client.Client, imgName string, cr cruntime.Manager, t time.Duration) error { + timeout := make(chan bool, 1) + go func() { + time.Sleep(t) + timeout <- true + }() + + transferFinished := make(chan bool, 1) + var err error + go func() { + err = needsTransfer(imgClient, imgName, cr) + transferFinished <- true + }() + + select { + case <-transferFinished: + return err + case <-timeout: + return fmt.Errorf("needs transfer timed out in %f seconds", t.Seconds()) + } +} + // needsTransfer returns an error if an image needs to be retransfered func needsTransfer(imgClient *client.Client, imgName string, cr cruntime.Manager) error { imgDgst := "" // for instance sha256:7c92a2c6bbcb6b6beff92d0a940779769c2477b807c202954c537e2e0deb9bed From 9e81e06e61fc29c5a765c5b19c9a13c01800d452 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 23 Mar 2020 18:04:29 -0700 Subject: [PATCH 190/668] fix regex --- go.sum | 2 -- pkg/minikube/problem/err_map.go | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/go.sum b/go.sum index 29d0ae43c8..70ea938169 100644 --- a/go.sum +++ b/go.sum @@ -178,8 +178,6 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= -github.com/docker/machine v0.7.1-0.20190902101342-b170508bf44c h1:/CqsDCJqQY+xBxJ65ri4ZGw6jXugauXisH3roam3Ics= -github.com/docker/machine v0.7.1-0.20190902101342-b170508bf44c/go.mod h1:I8mPNDeK1uH+JTcUU7X0ZW8KiYz0jyAgNaeSJ1rCfDI= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= diff --git a/pkg/minikube/problem/err_map.go b/pkg/minikube/problem/err_map.go index 72547cf57f..ad37cc2478 100644 --- a/pkg/minikube/problem/err_map.go +++ b/pkg/minikube/problem/err_map.go @@ -227,7 +227,7 @@ var vmProblems = map[string]match{ GOOS: []string{"linux"}, }, "NONE_CONNTRACK": { - Regexp: re(`[ERROR FileExisting-conntrack]: conntrack not found in system path`), + Regexp: re(`\[ERROR FileExisting-conntrack\]: conntrack not found in system path`), Advice: "Kubernetes 1.18 and higher requires conntrack to be installed, to install it on your machine for the none driver simply run `apt-get install conntrack` as root.", GOOS: []string{"linux"}, }, From 3c37556627b8932f76e3db46b6ebdae8dc0ed156 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Mon, 23 Mar 2020 18:06:53 -0700 Subject: [PATCH 191/668] update vm podman to same version --- deploy/iso/minikube-iso/package/podman/podman.hash | 1 + deploy/iso/minikube-iso/package/podman/podman.mk | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/deploy/iso/minikube-iso/package/podman/podman.hash b/deploy/iso/minikube-iso/package/podman/podman.hash index af242fcc80..fda1df9870 100644 --- a/deploy/iso/minikube-iso/package/podman/podman.hash +++ b/deploy/iso/minikube-iso/package/podman/podman.hash @@ -12,3 +12,4 @@ sha256 2e027c1b935f3a03f27ef7f17823ccf334607a17d033d4ce53a90b98294e7f68 v1.4.4.t sha256 61b44b739c485125f179044f7aa7dc58c820f771bce4ce495fa555a38dc68b57 v1.6.3.tar.gz sha256 6e59821320b435543bc7554e73faa66d5956e4ad3f7e7f4ea03bebd6726758e9 v1.6.4.tar.gz sha256 50960293c2019e38ce69e4cf5f0a683e7fea1562b180e38e38c9355fcd7c4f0d v1.6.5.tar.gz +sha256 69f7ff81da1510ebf2962c1de3170675ca3cd8a24bc00c93742a24bcce17c752 v1.8.2.tar.gz diff --git a/deploy/iso/minikube-iso/package/podman/podman.mk b/deploy/iso/minikube-iso/package/podman/podman.mk index 911dc30cf3..8781c0b0b9 100644 --- a/deploy/iso/minikube-iso/package/podman/podman.mk +++ b/deploy/iso/minikube-iso/package/podman/podman.mk @@ -1,5 +1,5 @@ -PODMAN_VERSION = v1.6.5 -PODMAN_COMMIT = 45e7be192ef99e870c59a1cd2c1fa7940b0af2d6 +PODMAN_VERSION = v1.8.2 +PODMAN_COMMIT = 028e3317eb1494b9b2acba4a0a295df80fae66cc PODMAN_SITE = https://github.com/containers/libpod/archive PODMAN_SOURCE = $(PODMAN_VERSION).tar.gz PODMAN_LICENSE = Apache-2.0 From 90de1d4c7f82212bd58899a0b2f24d751c7460aa Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Mon, 23 Mar 2020 18:37:13 -0700 Subject: [PATCH 192/668] install conntrack on integeration machines --- hack/jenkins/linux_integration_tests_none.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/hack/jenkins/linux_integration_tests_none.sh b/hack/jenkins/linux_integration_tests_none.sh index a902ef15ee..9f45f2ad54 100755 --- a/hack/jenkins/linux_integration_tests_none.sh +++ b/hack/jenkins/linux_integration_tests_none.sh @@ -49,7 +49,15 @@ sudo rm -rf /var/lib/minikube/* # Stop any leftover kubelet sudo systemctl is-active --quiet kubelet \ && echo "stopping kubelet" \ - && sudo systemctl stop kubelet + && sudo systemctl stop -f kubelet + + # conntrack is required for kubernetes 1.18 and higher for none driver +if ! conntrack --version &>/dev/null; then + echo "WARNING: No contrack is not installed" + sudo apt-get update -qq + sudo apt-get -qq -y install conntrack +fi + mkdir -p cron && gsutil -m rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES" sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP" From 8b587384912fd838baaafeccd799bdf08a29c5ca Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 24 Mar 2020 02:54:47 +0000 Subject: [PATCH 193/668] check for conntrack at start time, remove useless solution message --- cmd/minikube/cmd/start.go | 10 ++++++++++ pkg/minikube/problem/err_map.go | 5 ----- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index c349f5fa3f..67e9267830 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -834,6 +834,16 @@ func validateFlags(cmd *cobra.Command, drvName string) { if runtime != "docker" { out.WarningT("Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!", out.V{"runtime": runtime}) } + + conntrackK8sVersion, _ := semver.Make("1.18.0-rc.0") + currentK8sVersion, _ := semver.Make(strings.TrimPrefix(getKubernetesVersion(nil), version.VersionPrefix)) + if currentK8sVersion.GE(conntrackK8sVersion) { + err := exec.Command("conntrack").Run() + if err != nil { + exit.WithCodeT(exit.Config, "The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}", out.V{"k8sVersion": currentK8sVersion.String()}) + + } + } } // check that kubeadm extra args contain only whitelisted parameters diff --git a/pkg/minikube/problem/err_map.go b/pkg/minikube/problem/err_map.go index ad37cc2478..75a02575ca 100644 --- a/pkg/minikube/problem/err_map.go +++ b/pkg/minikube/problem/err_map.go @@ -226,11 +226,6 @@ var vmProblems = map[string]match{ Issues: []int{6083, 5636}, GOOS: []string{"linux"}, }, - "NONE_CONNTRACK": { - Regexp: re(`\[ERROR FileExisting-conntrack\]: conntrack not found in system path`), - Advice: "Kubernetes 1.18 and higher requires conntrack to be installed, to install it on your machine for the none driver simply run `apt-get install conntrack` as root.", - GOOS: []string{"linux"}, - }, // VirtualBox "VBOX_BLOCKED": { From 4ab849b604f945ace1d358e6a6597cbc2e246220 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 23 Mar 2020 20:18:03 -0700 Subject: [PATCH 194/668] shorter variable names --- cmd/minikube/cmd/start.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 67e9267830..74a6fcd134 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -835,12 +835,12 @@ func validateFlags(cmd *cobra.Command, drvName string) { out.WarningT("Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!", out.V{"runtime": runtime}) } - conntrackK8sVersion, _ := semver.Make("1.18.0-rc.0") - currentK8sVersion, _ := semver.Make(strings.TrimPrefix(getKubernetesVersion(nil), version.VersionPrefix)) - if currentK8sVersion.GE(conntrackK8sVersion) { + connVer, _ := semver.Make("1.18.0-rc.0") + k8sVersion, _ := semver.Make(strings.TrimPrefix(getKubernetesVersion(nil), version.VersionPrefix)) + if k8sVersion.GE(connVer) { err := exec.Command("conntrack").Run() if err != nil { - exit.WithCodeT(exit.Config, "The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}", out.V{"k8sVersion": currentK8sVersion.String()}) + exit.WithCodeT(exit.Config, "The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}", out.V{"k8sVersion": k8sVersion.String()}) } } From ca3f88a73416feb90a30d0a9338e8ad0939ee3cd Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 23 Mar 2020 20:19:07 -0700 Subject: [PATCH 195/668] comment --- cmd/minikube/cmd/start.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 74a6fcd134..5710797933 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -835,6 +835,7 @@ func validateFlags(cmd *cobra.Command, drvName string) { out.WarningT("Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!", out.V{"runtime": runtime}) } + // conntrack is required starting with kubernetes 1.18, include the release candidates for completion connVer, _ := semver.Make("1.18.0-rc.0") k8sVersion, _ := semver.Make(strings.TrimPrefix(getKubernetesVersion(nil), version.VersionPrefix)) if k8sVersion.GE(connVer) { From 8489c074fe9746b60466fb89ffab641e562e5f2f Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 23 Mar 2020 20:46:51 -0700 Subject: [PATCH 196/668] code cleanup --- cmd/minikube/cmd/start.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 5710797933..22e3cae81e 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -60,6 +60,7 @@ import ( "k8s.io/minikube/pkg/minikube/proxy" "k8s.io/minikube/pkg/minikube/registry" "k8s.io/minikube/pkg/minikube/translate" + "k8s.io/minikube/pkg/util" pkgutil "k8s.io/minikube/pkg/util" "k8s.io/minikube/pkg/version" ) @@ -836,12 +837,11 @@ func validateFlags(cmd *cobra.Command, drvName string) { } // conntrack is required starting with kubernetes 1.18, include the release candidates for completion - connVer, _ := semver.Make("1.18.0-rc.0") - k8sVersion, _ := semver.Make(strings.TrimPrefix(getKubernetesVersion(nil), version.VersionPrefix)) - if k8sVersion.GE(connVer) { + version, _ := util.ParseKubernetesVersion(getKubernetesVersion(nil)) + if version.GTE(semver.MustParse("1.18.0-beta.1")) { err := exec.Command("conntrack").Run() if err != nil { - exit.WithCodeT(exit.Config, "The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}", out.V{"k8sVersion": k8sVersion.String()}) + exit.WithCodeT(exit.Config, "The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}", out.V{"k8sVersion": version.String()}) } } From beec3b1ce133cc4de8c32d488a7e853d90b4bfae Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Mon, 23 Mar 2020 13:34:36 -0700 Subject: [PATCH 197/668] stop kubelet on docker stop --- pkg/drivers/common.go | 32 ++++++++++++++++++++++++++++++++ pkg/drivers/kic/kic.go | 6 ++++++ pkg/drivers/none/none.go | 37 ++++--------------------------------- 3 files changed, 42 insertions(+), 33 deletions(-) diff --git a/pkg/drivers/common.go b/pkg/drivers/common.go index 2fde2efde8..20d3f537a9 100644 --- a/pkg/drivers/common.go +++ b/pkg/drivers/common.go @@ -17,11 +17,15 @@ limitations under the License. package drivers import ( + "fmt" "io" "io/ioutil" "os" + "os/exec" "path/filepath" + "strings" "syscall" + "time" "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/mcnflag" @@ -29,6 +33,8 @@ import ( "github.com/docker/machine/libmachine/ssh" "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/util/retry" ) // This file is for common code shared among internal machine drivers @@ -139,3 +145,29 @@ func fixMachinePermissions(path string) error { } return nil } + +// StopKubelet idempotently stops the kubelet +func StopKubelet(cr command.Runner) error { + glog.Infof("stopping kubelet.service ...") + stop := func() error { + cmd := exec.Command("sudo", "systemctl", "stop", "-f", "kubelet.service") + if rr, err := cr.RunCmd(cmd); err != nil { + glog.Errorf("temporary error for %q : %v", rr.Command(), err) + } + cmd = exec.Command("sudo", "systemctl", "show", "-p", "SubState", "kubelet") + rr, err := cr.RunCmd(cmd) + if err != nil { + glog.Errorf("temporary error: for %q : %v", rr.Command(), err) + } + if !strings.Contains(rr.Stdout.String(), "dead") && !strings.Contains(rr.Stdout.String(), "failed") { + return fmt.Errorf("unexpected kubelet state: %q", rr.Stdout.String()) + } + return nil + } + + if err := retry.Expo(stop, 2*time.Second, time.Minute*3, 5); err != nil { + return errors.Wrapf(err, "error stopping kubelet") + } + + return nil +} diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index 2488ca8e86..ea50fe3ad2 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -312,6 +312,12 @@ func (d *Driver) Start() error { // Stop a host gracefully, including any containers that we are managing. func (d *Driver) Stop() error { + // docker does not send right SIG for systemd to know to stop the systemd. + // to avoid bind adress be taken on an upgrade. more info https://github.com/kubernetes/minikube/issues/7171 + if err := pkgdrivers.StopKubelet(d.exec); err != nil { + glog.Warning("couldn't stop kubelet %v", err) + } + cmd := exec.Command(d.NodeConfig.OCIBinary, "stop", d.MachineName) if err := cmd.Run(); err != nil { return errors.Wrapf(err, "stopping %s", d.MachineName) diff --git a/pkg/drivers/none/none.go b/pkg/drivers/none/none.go index 2fd3565741..f05f907138 100644 --- a/pkg/drivers/none/none.go +++ b/pkg/drivers/none/none.go @@ -20,8 +20,6 @@ import ( "fmt" "net" "os/exec" - "strings" - "time" "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/state" @@ -35,7 +33,6 @@ import ( "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/vmpath" - "k8s.io/minikube/pkg/util/retry" ) // cleanupPaths are paths to be removed by cleanup, and are used by both kubeadm and minikube. @@ -156,8 +153,8 @@ func (d *Driver) GetState() (state.State, error) { // Kill stops a host forcefully, including any containers that we are managing. func (d *Driver) Kill() error { - if err := stopKubelet(d.exec); err != nil { - return errors.Wrap(err, "kubelet") + if err := pkgdrivers.StopKubelet(d.exec); err != nil { + glog.Warning("couldn't stop kubelet %v. will continue with kill anyways.", err) } // First try to gracefully stop containers @@ -220,8 +217,8 @@ func (d *Driver) Start() error { // Stop a host gracefully, including any containers that we are managing. func (d *Driver) Stop() error { - if err := stopKubelet(d.exec); err != nil { - return errors.Wrap(err, "stop kubelet") + if err := pkgdrivers.StopKubelet(d.exec); err != nil { + glog.Warning("couldn't stop kubelet %v. will continue with stop anyways.", err) } containers, err := d.runtime.ListContainers(cruntime.ListOptions{}) if err != nil { @@ -241,32 +238,6 @@ func (d *Driver) RunSSHCommandFromDriver() error { return fmt.Errorf("driver does not support ssh commands") } -// stopKubelet idempotently stops the kubelet -func stopKubelet(cr command.Runner) error { - glog.Infof("stopping kubelet.service ...") - stop := func() error { - cmd := exec.Command("sudo", "systemctl", "stop", "kubelet.service") - if rr, err := cr.RunCmd(cmd); err != nil { - glog.Errorf("temporary error for %q : %v", rr.Command(), err) - } - cmd = exec.Command("sudo", "systemctl", "show", "-p", "SubState", "kubelet") - rr, err := cr.RunCmd(cmd) - if err != nil { - glog.Errorf("temporary error: for %q : %v", rr.Command(), err) - } - if !strings.Contains(rr.Stdout.String(), "dead") && !strings.Contains(rr.Stdout.String(), "failed") { - return fmt.Errorf("unexpected kubelet state: %q", rr.Stdout.String()) - } - return nil - } - - if err := retry.Expo(stop, 2*time.Second, time.Minute*3, 5); err != nil { - return errors.Wrapf(err, "error stopping kubelet") - } - - return nil -} - // restartKubelet restarts the kubelet func restartKubelet(cr command.Runner) error { glog.Infof("restarting kubelet.service ...") From 298be78e5f9fdbc300493f7382361e69a9418cd8 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Mon, 23 Mar 2020 13:41:15 -0700 Subject: [PATCH 198/668] remove duplicate code --- pkg/drivers/common.go | 32 -------------------------------- pkg/drivers/kic/kic.go | 7 ++++--- pkg/drivers/none/none.go | 9 +++++---- pkg/minikube/kubelet/kubelet.go | 2 +- 4 files changed, 10 insertions(+), 40 deletions(-) diff --git a/pkg/drivers/common.go b/pkg/drivers/common.go index 20d3f537a9..2fde2efde8 100644 --- a/pkg/drivers/common.go +++ b/pkg/drivers/common.go @@ -17,15 +17,11 @@ limitations under the License. package drivers import ( - "fmt" "io" "io/ioutil" "os" - "os/exec" "path/filepath" - "strings" "syscall" - "time" "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/mcnflag" @@ -33,8 +29,6 @@ import ( "github.com/docker/machine/libmachine/ssh" "github.com/golang/glog" "github.com/pkg/errors" - "k8s.io/minikube/pkg/minikube/command" - "k8s.io/minikube/pkg/util/retry" ) // This file is for common code shared among internal machine drivers @@ -145,29 +139,3 @@ func fixMachinePermissions(path string) error { } return nil } - -// StopKubelet idempotently stops the kubelet -func StopKubelet(cr command.Runner) error { - glog.Infof("stopping kubelet.service ...") - stop := func() error { - cmd := exec.Command("sudo", "systemctl", "stop", "-f", "kubelet.service") - if rr, err := cr.RunCmd(cmd); err != nil { - glog.Errorf("temporary error for %q : %v", rr.Command(), err) - } - cmd = exec.Command("sudo", "systemctl", "show", "-p", "SubState", "kubelet") - rr, err := cr.RunCmd(cmd) - if err != nil { - glog.Errorf("temporary error: for %q : %v", rr.Command(), err) - } - if !strings.Contains(rr.Stdout.String(), "dead") && !strings.Contains(rr.Stdout.String(), "failed") { - return fmt.Errorf("unexpected kubelet state: %q", rr.Stdout.String()) - } - return nil - } - - if err := retry.Expo(stop, 2*time.Second, time.Minute*3, 5); err != nil { - return errors.Wrapf(err, "error stopping kubelet") - } - - return nil -} diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index ea50fe3ad2..d43fdd33c4 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -37,6 +37,7 @@ import ( "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/download" + "k8s.io/minikube/pkg/minikube/kubelet" ) // Driver represents a kic driver https://minikube.sigs.k8s.io/docs/reference/drivers/docker @@ -313,9 +314,9 @@ func (d *Driver) Start() error { // Stop a host gracefully, including any containers that we are managing. func (d *Driver) Stop() error { // docker does not send right SIG for systemd to know to stop the systemd. - // to avoid bind adress be taken on an upgrade. more info https://github.com/kubernetes/minikube/issues/7171 - if err := pkgdrivers.StopKubelet(d.exec); err != nil { - glog.Warning("couldn't stop kubelet %v", err) + // to avoid bind address be taken on an upgrade. more info https://github.com/kubernetes/minikube/issues/7171 + if err := kubelet.Stop(d.exec); err != nil { + glog.Warningf("couldn't stop kubelet. will continue with stop anyways: %v", err) } cmd := exec.Command(d.NodeConfig.OCIBinary, "stop", d.MachineName) diff --git a/pkg/drivers/none/none.go b/pkg/drivers/none/none.go index f05f907138..e772282a04 100644 --- a/pkg/drivers/none/none.go +++ b/pkg/drivers/none/none.go @@ -32,6 +32,7 @@ import ( "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/kubeconfig" + "k8s.io/minikube/pkg/minikube/kubelet" "k8s.io/minikube/pkg/minikube/vmpath" ) @@ -153,8 +154,8 @@ func (d *Driver) GetState() (state.State, error) { // Kill stops a host forcefully, including any containers that we are managing. func (d *Driver) Kill() error { - if err := pkgdrivers.StopKubelet(d.exec); err != nil { - glog.Warning("couldn't stop kubelet %v. will continue with kill anyways.", err) + if err := kubelet.Stop(d.exec); err != nil { + glog.Warningf("couldn't stop kubelet. will continue with kill anyways: %v", err) } // First try to gracefully stop containers @@ -217,8 +218,8 @@ func (d *Driver) Start() error { // Stop a host gracefully, including any containers that we are managing. func (d *Driver) Stop() error { - if err := pkgdrivers.StopKubelet(d.exec); err != nil { - glog.Warning("couldn't stop kubelet %v. will continue with stop anyways.", err) + if err := kubelet.Stop(d.exec); err != nil { + glog.Warningf("couldn't stop kubelet. will continue with stop anyways: %v", err) } containers, err := d.runtime.ListContainers(cruntime.ListOptions{}) if err != nil { diff --git a/pkg/minikube/kubelet/kubelet.go b/pkg/minikube/kubelet/kubelet.go index 2adc132681..4df9148636 100644 --- a/pkg/minikube/kubelet/kubelet.go +++ b/pkg/minikube/kubelet/kubelet.go @@ -32,7 +32,7 @@ import ( func Stop(cr command.Runner) error { glog.Infof("stopping kubelet ...") stop := func() error { - cmd := exec.Command("sudo", "systemctl", "stop", "kubelet.service") + cmd := exec.Command("sudo", "systemctl", "stop", "-f", "kubelet.service") if rr, err := cr.RunCmd(cmd); err != nil { glog.Errorf("temporary error for %q : %v", rr.Command(), err) } From fae3e34319b8dd82ed58fef3ce91b01cc2faf93b Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Mon, 23 Mar 2020 14:07:47 -0700 Subject: [PATCH 199/668] add stop force stop --- pkg/drivers/kic/kic.go | 3 +++ pkg/drivers/none/none.go | 4 ++-- pkg/minikube/kubelet/kubelet.go | 15 ++++++++++++++- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index d43fdd33c4..5bf91e9710 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -247,6 +247,9 @@ func (d *Driver) GetState() (state.State, error) { // Kill stops a host forcefully, including any containers that we are managing. func (d *Driver) Kill() error { + if err := kubelet.ForceStop(d.exec); err != nil { + glog.Warningf("couldn't force stop kubelet. will continue with kill anyways: %v", err) + } cmd := exec.Command(d.NodeConfig.OCIBinary, "kill", d.MachineName) if err := cmd.Run(); err != nil { return errors.Wrapf(err, "killing kic node %s", d.MachineName) diff --git a/pkg/drivers/none/none.go b/pkg/drivers/none/none.go index e772282a04..6e655183a8 100644 --- a/pkg/drivers/none/none.go +++ b/pkg/drivers/none/none.go @@ -154,8 +154,8 @@ func (d *Driver) GetState() (state.State, error) { // Kill stops a host forcefully, including any containers that we are managing. func (d *Driver) Kill() error { - if err := kubelet.Stop(d.exec); err != nil { - glog.Warningf("couldn't stop kubelet. will continue with kill anyways: %v", err) + if err := kubelet.ForceStop(d.exec); err != nil { + glog.Warningf("couldn't force stop kubelet. will continue with kill anyways: %v", err) } // First try to gracefully stop containers diff --git a/pkg/minikube/kubelet/kubelet.go b/pkg/minikube/kubelet/kubelet.go index 4df9148636..dc6037f2b8 100644 --- a/pkg/minikube/kubelet/kubelet.go +++ b/pkg/minikube/kubelet/kubelet.go @@ -30,9 +30,22 @@ import ( // Stop idempotently stops the kubelet func Stop(cr command.Runner) error { + return stop(cr, false) +} + +// ForceStop idempotently force stops the kubelet +func ForceStop(cr command.Runner) error { + return stop(cr, true) +} + +// stop dempotently stops the kubelet +func stop(cr command.Runner, force bool) error { glog.Infof("stopping kubelet ...") stop := func() error { - cmd := exec.Command("sudo", "systemctl", "stop", "-f", "kubelet.service") + cmd := exec.Command("sudo", "systemctl", "stop", "kubelet.service") + if force { + cmd = exec.Command("sudo", "systemctl", "stop", "-f", "kubelet.service") + } if rr, err := cr.RunCmd(cmd); err != nil { glog.Errorf("temporary error for %q : %v", rr.Command(), err) } From 5f90a319bd79fcc5dff1c61dc735ddfe43f93dd3 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Mon, 23 Mar 2020 16:38:09 -0700 Subject: [PATCH 200/668] stop k8s in kic and none --- cmd/minikube/cmd/pause.go | 3 ++- cmd/minikube/cmd/unpause.go | 3 ++- pkg/drivers/kic/kic.go | 22 ++++++++++++++++++++++ pkg/drivers/none/none.go | 3 ++- pkg/minikube/cluster/pause.go | 8 -------- pkg/minikube/constants/constants.go | 8 ++++++++ 6 files changed, 36 insertions(+), 11 deletions(-) diff --git a/cmd/minikube/cmd/pause.go b/cmd/minikube/cmd/pause.go index 33ef1f9f04..d7b090ce83 100644 --- a/cmd/minikube/cmd/pause.go +++ b/cmd/minikube/cmd/pause.go @@ -24,6 +24,7 @@ import ( "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/cluster" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" @@ -84,6 +85,6 @@ func runPause(cmd *cobra.Command, args []string) { } func init() { - pauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", cluster.DefaultNamespaces, "namespaces to pause") + pauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", constants.DefaultNamespaces, "namespaces to pause") pauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, pause all namespaces") } diff --git a/cmd/minikube/cmd/unpause.go b/cmd/minikube/cmd/unpause.go index d27801fc23..e7295b74cd 100644 --- a/cmd/minikube/cmd/unpause.go +++ b/cmd/minikube/cmd/unpause.go @@ -24,6 +24,7 @@ import ( "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/cluster" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" @@ -82,6 +83,6 @@ var unpauseCmd = &cobra.Command{ } func init() { - unpauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", cluster.DefaultNamespaces, "namespaces to unpause") + unpauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", constants.DefaultNamespaces, "namespaces to unpause") unpauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, unpause all namespaces") } diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index 5bf91e9710..78d2e45828 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -36,6 +36,7 @@ import ( "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/kubelet" ) @@ -320,6 +321,27 @@ func (d *Driver) Stop() error { // to avoid bind address be taken on an upgrade. more info https://github.com/kubernetes/minikube/issues/7171 if err := kubelet.Stop(d.exec); err != nil { glog.Warningf("couldn't stop kubelet. will continue with stop anyways: %v", err) + if err := kubelet.ForceStop(d.exec); err != nil { + glog.Warningf("couldn't force stop kubelet. will continue with stop anyways: %v", err) + } + } + + runtime, err := cruntime.New(cruntime.Config{Type: d.NodeConfig.ContainerRuntime, Runner: d.exec}) + if err != nil { // won't return error because: + // even though we can't stop the cotainers inside, we still wanna stop the minikube container itself + glog.Errorf("unable to get container runtime: %v", err) + } else { + containers, err := runtime.ListContainers(cruntime.ListOptions{Namespaces: constants.DefaultNamespaces}) + if err != nil { + return errors.Wrap(err, "containers") + } + if len(containers) > 0 { + if err := runtime.StopContainers(containers); err != nil { + return errors.Wrap(err, "stop containers") + } + } + glog.Infof("successfully stopped kubernetes!") + } cmd := exec.Command(d.NodeConfig.OCIBinary, "stop", d.MachineName) diff --git a/pkg/drivers/none/none.go b/pkg/drivers/none/none.go index 6e655183a8..a0be67ff56 100644 --- a/pkg/drivers/none/none.go +++ b/pkg/drivers/none/none.go @@ -221,7 +221,8 @@ func (d *Driver) Stop() error { if err := kubelet.Stop(d.exec); err != nil { glog.Warningf("couldn't stop kubelet. will continue with stop anyways: %v", err) } - containers, err := d.runtime.ListContainers(cruntime.ListOptions{}) + containers, err := d.runtime.ListContainers(cruntime.ListOptions{Namespaces: constants.DefaultNamespaces}) + if err != nil { return errors.Wrap(err, "containers") } diff --git a/pkg/minikube/cluster/pause.go b/pkg/minikube/cluster/pause.go index 2f98cf6de3..d7661f1274 100644 --- a/pkg/minikube/cluster/pause.go +++ b/pkg/minikube/cluster/pause.go @@ -24,14 +24,6 @@ import ( "k8s.io/minikube/pkg/minikube/kubelet" ) -// DefaultNamespaces are namespaces used by minikube, including addons -var DefaultNamespaces = []string{ - "kube-system", - "kubernetes-dashboard", - "storage-gluster", - "istio-operator", -} - // Pause pauses a Kubernetes cluster func Pause(cr cruntime.Manager, r command.Runner, namespaces []string) ([]string, error) { ids := []string{} diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index ecaa8cdb98..33ae576f48 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -92,4 +92,12 @@ var ( KubernetesReleaseBinaries = []string{"kubelet", "kubeadm", "kubectl"} // ImageCacheDir is the path to the image cache directory ImageCacheDir = localpath.MakeMiniPath("cache", "images") + + // DefaultNamespaces are kubernetes namespaces used by minikube, including addons + DefaultNamespaces = []string{ + "kube-system", + "kubernetes-dashboard", + "storage-gluster", + "istio-operator", + } ) From 47c8e9c87b69aba17b44ac190e614a34c46a4368 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Mon, 23 Mar 2020 16:42:55 -0700 Subject: [PATCH 201/668] dont return on error kic sto --- pkg/drivers/kic/kic.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index 78d2e45828..c1fa7535c8 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -333,11 +333,11 @@ func (d *Driver) Stop() error { } else { containers, err := runtime.ListContainers(cruntime.ListOptions{Namespaces: constants.DefaultNamespaces}) if err != nil { - return errors.Wrap(err, "containers") + glog.Errorf("unable list containers : %v", err) } if len(containers) > 0 { if err := runtime.StopContainers(containers); err != nil { - return errors.Wrap(err, "stop containers") + glog.Errorf("unable to stop containers : %v", err) } } glog.Infof("successfully stopped kubernetes!") From 545348341a6b698e95fbfc10ca147af35a71ced5 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Mon, 23 Mar 2020 16:44:33 -0700 Subject: [PATCH 202/668] for none let it stop all containers --- pkg/drivers/none/none.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/drivers/none/none.go b/pkg/drivers/none/none.go index a0be67ff56..d42d72c95a 100644 --- a/pkg/drivers/none/none.go +++ b/pkg/drivers/none/none.go @@ -221,7 +221,7 @@ func (d *Driver) Stop() error { if err := kubelet.Stop(d.exec); err != nil { glog.Warningf("couldn't stop kubelet. will continue with stop anyways: %v", err) } - containers, err := d.runtime.ListContainers(cruntime.ListOptions{Namespaces: constants.DefaultNamespaces}) + containers, err := d.runtime.ListContainers(cruntime.ListOptions{}) if err != nil { return errors.Wrap(err, "containers") From 36f3f657af8da5aaf1fe9dbe725e8f58fd4a79f6 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Mon, 23 Mar 2020 16:51:18 -0700 Subject: [PATCH 203/668] force stop if cant stop for none --- pkg/drivers/none/none.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/drivers/none/none.go b/pkg/drivers/none/none.go index d42d72c95a..566275f6aa 100644 --- a/pkg/drivers/none/none.go +++ b/pkg/drivers/none/none.go @@ -220,6 +220,9 @@ func (d *Driver) Start() error { func (d *Driver) Stop() error { if err := kubelet.Stop(d.exec); err != nil { glog.Warningf("couldn't stop kubelet. will continue with stop anyways: %v", err) + if err := kubelet.ForceStop(d.exec); err != nil { + glog.Warningf("couldn't force stop kubelet. will continue with stop anyways: %v", err) + } } containers, err := d.runtime.ListContainers(cruntime.ListOptions{}) From 7619a5075ae85f50ef71ae12b2d9e0c7e972b5d1 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Mon, 23 Mar 2020 17:43:21 -0700 Subject: [PATCH 204/668] fix logging --- pkg/drivers/kic/kic.go | 2 +- pkg/minikube/kubelet/kubelet.go | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index c1fa7535c8..bdeefc0665 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -333,7 +333,7 @@ func (d *Driver) Stop() error { } else { containers, err := runtime.ListContainers(cruntime.ListOptions{Namespaces: constants.DefaultNamespaces}) if err != nil { - glog.Errorf("unable list containers : %v", err) + glog.Infof("unable list containers : %v", err) } if len(containers) > 0 { if err := runtime.StopContainers(containers); err != nil { diff --git a/pkg/minikube/kubelet/kubelet.go b/pkg/minikube/kubelet/kubelet.go index dc6037f2b8..8f15e6aca0 100644 --- a/pkg/minikube/kubelet/kubelet.go +++ b/pkg/minikube/kubelet/kubelet.go @@ -41,18 +41,18 @@ func ForceStop(cr command.Runner) error { // stop dempotently stops the kubelet func stop(cr command.Runner, force bool) error { glog.Infof("stopping kubelet ...") - stop := func() error { + stp := func() error { cmd := exec.Command("sudo", "systemctl", "stop", "kubelet.service") if force { cmd = exec.Command("sudo", "systemctl", "stop", "-f", "kubelet.service") } if rr, err := cr.RunCmd(cmd); err != nil { - glog.Errorf("temporary error for %q : %v", rr.Command(), err) + return fmt.Errorf("temporary error for %q : %v", rr.Command(), err) } cmd = exec.Command("sudo", "systemctl", "show", "-p", "SubState", "kubelet") rr, err := cr.RunCmd(cmd) if err != nil { - glog.Errorf("temporary error: for %q : %v", rr.Command(), err) + return fmt.Errorf("temporary error: for %q : %v", rr.Command(), err) } if !strings.Contains(rr.Stdout.String(), "dead") && !strings.Contains(rr.Stdout.String(), "failed") { return fmt.Errorf("unexpected kubelet state: %q", rr.Stdout.String()) @@ -60,10 +60,9 @@ func stop(cr command.Runner, force bool) error { return nil } - if err := retry.Expo(stop, 2*time.Second, time.Minute*3, 5); err != nil { + if err := retry.Expo(stp, 1*time.Second, time.Minute, 2); err != nil { return errors.Wrapf(err, "error stopping kubelet") } - return nil } From aec6fdc25aa793ffb7c08f6dc8d4a432d0b962d4 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Mon, 23 Mar 2020 20:39:21 -0700 Subject: [PATCH 205/668] initialize kic runner in stop --- pkg/drivers/kic/kic.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index bdeefc0665..2055f91931 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -248,6 +248,8 @@ func (d *Driver) GetState() (state.State, error) { // Kill stops a host forcefully, including any containers that we are managing. func (d *Driver) Kill() error { + // on init this doesn't get filled when called from cmd + d.exec = command.NewKICRunner(d.MachineName, d.OCIBinary) if err := kubelet.ForceStop(d.exec); err != nil { glog.Warningf("couldn't force stop kubelet. will continue with kill anyways: %v", err) } @@ -317,6 +319,8 @@ func (d *Driver) Start() error { // Stop a host gracefully, including any containers that we are managing. func (d *Driver) Stop() error { + // on init this doesn't get filled when called from cmd + d.exec = command.NewKICRunner(d.MachineName, d.OCIBinary) // docker does not send right SIG for systemd to know to stop the systemd. // to avoid bind address be taken on an upgrade. more info https://github.com/kubernetes/minikube/issues/7171 if err := kubelet.Stop(d.exec); err != nil { From 67bd258465d5c8b40737d651c8f9f468f58101a6 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Mon, 23 Mar 2020 23:02:27 -0700 Subject: [PATCH 206/668] remove extra line --- pkg/drivers/none/none.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/drivers/none/none.go b/pkg/drivers/none/none.go index 566275f6aa..fe5498c91a 100644 --- a/pkg/drivers/none/none.go +++ b/pkg/drivers/none/none.go @@ -225,7 +225,6 @@ func (d *Driver) Stop() error { } } containers, err := d.runtime.ListContainers(cruntime.ListOptions{}) - if err != nil { return errors.Wrap(err, "containers") } From 653fac46ec7d75320993b9b50c5e95127146a81f Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Tue, 24 Mar 2020 00:46:48 -0700 Subject: [PATCH 207/668] skip preload test for none --- test/integration/aaa_download_only_test.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index 9640212800..7cd32410ab 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -71,14 +71,16 @@ func TestDownloadOnly(t *testing.T) { t.Errorf("%s failed: %v", args, err) } - if download.PreloadExists(v, r) { - // Just make sure the tarball path exists - if _, err := os.Stat(download.TarballPath(v)); err != nil { - t.Errorf("preloaded tarball path doesn't exist: %v", err) + // skip for none, as none driver does not have preload feature. + if !NoneDriver() { + if download.PreloadExists(v, r) { + // Just make sure the tarball path exists + if _, err := os.Stat(download.TarballPath(v)); err != nil { + t.Errorf("preloaded tarball path doesn't exist: %v", err) + } + return } - return } - imgs, err := images.Kubeadm("", v) if err != nil { t.Errorf("kubeadm images: %v %+v", v, err) From 1d5ef29f2ed3eb8770df83b2eebadd658ddc0204 Mon Sep 17 00:00:00 2001 From: Medya Ghazizadeh Date: Tue, 24 Mar 2020 04:11:11 -0700 Subject: [PATCH 208/668] Adjust GitHub actions timeout multiplier --- .github/workflows/main.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index fda40cd7f1..37d1991a0d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -113,7 +113,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -186,7 +186,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.timeout=70m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -254,7 +254,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=70m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -322,7 +322,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=70m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -400,7 +400,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=podman -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=podman -test.timeout=70m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) From 407637b4a2070122229e91cf7930e2c18d6b08bc Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Tue, 24 Mar 2020 06:07:51 -0700 Subject: [PATCH 209/668] download only for both docker and podman --- .github/workflows/main.yml | 20 +++++++++++++++----- test/integration/aaa_download_only_test.go | 22 ++++++---------------- test/integration/functional_test.go | 2 +- test/integration/main.go | 5 +++++ 4 files changed, 27 insertions(+), 22 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 37d1991a0d..79a9f0982e 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -113,7 +113,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -145,6 +145,8 @@ jobs: echo "----------------${numFail} Failures----------------------------" echo $STAT | jq '.FailedTests' || true echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi docker_ubuntu_18_04: runs-on: ubuntu-18.04 @@ -186,7 +188,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.timeout=70m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -218,6 +220,8 @@ jobs: echo "----------------${numFail} Failures----------------------------" echo $STAT | jq '.FailedTests' || true echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi none_ubuntu16_04: needs: [build_minikube] @@ -254,7 +258,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=70m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=50m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -286,6 +290,8 @@ jobs: echo "----------------${numFail} Failures----------------------------" echo $STAT | jq '.FailedTests' || true echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi none_ubuntu18_04: needs: [build_minikube] @@ -322,7 +328,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=70m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=50m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -354,6 +360,8 @@ jobs: echo "----------------${numFail} Failures----------------------------" echo $STAT | jq '.FailedTests' || true echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi podman_ubuntu_18_04: needs: [build_minikube] @@ -400,7 +408,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=podman -test.timeout=70m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=podman -test.timeout=70m -test.v -timeout-multiplier=1.3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -432,6 +440,8 @@ jobs: echo "----------------${numFail} Failures----------------------------" echo $STAT | jq '.FailedTests' || true echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi # After all 4 integration tests finished # collect all the reports and upload diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index 7cd32410ab..8be373a5dd 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -29,7 +29,6 @@ import ( "runtime" "strings" "testing" - "time" "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/constants" @@ -147,16 +146,16 @@ func TestDownloadOnly(t *testing.T) { } } -func TestDownloadOnlyDocker(t *testing.T) { - if !runningDockerDriver(StartArgs()) { - t.Skip("this test only runs with the docker driver") +func TestDownloadOnlyKic(t *testing.T) { + if !KicDriver() { + t.Skip("skipping, only for docker or podman driver") } - profile := UniqueProfileName("download-docker") - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) + ctx, cancel := context.WithTimeout(context.Background(), Minutes(15)) defer Cleanup(t, profile, cancel) - args := []string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr", "--driver=docker"} + args := []string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr"} + args = append(args, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Errorf("%s failed: %v:\n%s", args, err, rr.Output()) @@ -178,12 +177,3 @@ func TestDownloadOnlyDocker(t *testing.T) { t.Errorf("checksum of %s does not match remote checksum (%s != %s)", tarball, string(remoteChecksum), string(checksum[:])) } } - -func runningDockerDriver(startArgs []string) bool { - for _, s := range startArgs { - if s == "--driver=docker" { - return true - } - } - return false -} diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index ec4ce69051..fc53eea6c0 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -725,7 +725,7 @@ func validateMySQL(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "--", "mysql", "-ppassword", "-e", "show databases;")) return err } - if err = retry.Expo(mysql, 5*time.Second, 180*time.Second); err != nil { + if err = retry.Expo(mysql, 5*time.Second, Seconds(180)); err != nil { t.Errorf("mysql failing: %v", err) } } diff --git a/test/integration/main.go b/test/integration/main.go index 04c22da305..b6c5a3916f 100644 --- a/test/integration/main.go +++ b/test/integration/main.go @@ -68,6 +68,11 @@ func HyperVDriver() bool { return strings.Contains(*startArgs, "--driver=hyperv") || strings.Contains(*startArgs, "--vm-driver=hyperv") } +// KicDriver returns whether or not this test is using the docker or podman driver +func KicDriver() bool { + return strings.Contains(*startArgs, "--driver=docker") || strings.Contains(*startArgs, "--vm-driver=docker") || strings.Contains(*startArgs, "--vm-driver=podman") || strings.Contains(*startArgs, "driver=podman") +} + // CanCleanup returns if cleanup is allowed func CanCleanup() bool { return *cleanup From 42980d609bc20c2790567296cab5a861055d2e33 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Tue, 24 Mar 2020 06:29:35 -0700 Subject: [PATCH 210/668] remove not needed retry --- test/integration/functional_test.go | 2 +- test/integration/version_upgrade_test.go | 12 +++--------- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index fc53eea6c0..5111590fe6 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -725,7 +725,7 @@ func validateMySQL(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "--", "mysql", "-ppassword", "-e", "show databases;")) return err } - if err = retry.Expo(mysql, 5*time.Second, Seconds(180)); err != nil { + if err = retry.Expo(mysql, 2*time.Second, Seconds(180)); err != nil { t.Errorf("mysql failing: %v", err) } } diff --git a/test/integration/version_upgrade_test.go b/test/integration/version_upgrade_test.go index e9c655a9b4..0c553b9998 100644 --- a/test/integration/version_upgrade_test.go +++ b/test/integration/version_upgrade_test.go @@ -76,7 +76,7 @@ func TestVersionUpgrade(t *testing.T) { } // Retry to allow flakiness for the previous release - if err := retry.Expo(r, 1*time.Second, Minutes(30), 3); err != nil { + if err := retry.Expo(r, 1*time.Second, Minutes(30), 2); err != nil { t.Fatalf("release start failed: %v", err) } @@ -120,14 +120,8 @@ func TestVersionUpgrade(t *testing.T) { } args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) - rr = &RunResult{} - r = func() error { - rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), args...)) - return err - } - - if err := retry.Expo(r, 1*time.Second, Minutes(30), 3); err == nil { - t.Fatalf("downgrading kubernetes should not be allowed: %v", err) + if rr, err := Run(t, exec.CommandContext(ctx, tf.Name(), args...)); err == nil { + t.Fatalf("downgrading kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Args()) } args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) From 0478f2d2048e7b82627351c9afe530156f0843b9 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Tue, 24 Mar 2020 06:37:16 -0700 Subject: [PATCH 211/668] adjuts the retry times --- test/integration/addons_test.go | 4 ++-- test/integration/fn_mount_cmd.go | 2 +- test/integration/fn_pvc.go | 2 +- test/integration/fn_tunnel_cmd.go | 2 +- test/integration/version_upgrade_test.go | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index 313fa01d15..a784f381bf 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -129,7 +129,7 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { return nil } - if err := retry.Expo(checkIngress, 500*time.Millisecond, Minutes(1)); err != nil { + if err := retry.Expo(checkIngress, 500*time.Millisecond, Seconds(90)); err != nil { t.Errorf("ingress never responded as expected on 127.0.0.1:80: %v", err) } @@ -241,7 +241,7 @@ func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile strin } // metrics-server takes some time to be able to collect metrics - if err := retry.Expo(checkMetricsServer, Seconds(13), Minutes(6)); err != nil { + if err := retry.Expo(checkMetricsServer, time.Second*3, Minutes(6)); err != nil { t.Errorf(err.Error()) } diff --git a/test/integration/fn_mount_cmd.go b/test/integration/fn_mount_cmd.go index eedc9e7983..8a3a9f68ee 100644 --- a/test/integration/fn_mount_cmd.go +++ b/test/integration/fn_mount_cmd.go @@ -106,7 +106,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { } start := time.Now() - if err := retry.Expo(checkMount, time.Second, 15*time.Second); err != nil { + if err := retry.Expo(checkMount, time.Millisecond*500, Seconds(15)); err != nil { // For local testing, allow macOS users to click prompt. If they don't, skip the test. if runtime.GOOS == "darwin" { t.Skip("skipping: mount did not appear, likely because macOS requires prompt to allow non-codesigned binaries to listen on non-localhost port") diff --git a/test/integration/fn_pvc.go b/test/integration/fn_pvc.go index 4f97c830ef..785783fd80 100644 --- a/test/integration/fn_pvc.go +++ b/test/integration/fn_pvc.go @@ -57,7 +57,7 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st } // Ensure the addon-manager has created the StorageClass before creating a claim, otherwise it won't be bound - if err := retry.Expo(checkStorageClass, time.Second, 90*time.Second); err != nil { + if err := retry.Expo(checkStorageClass, time.Millisecond*500, Seconds(100)); err != nil { t.Errorf("no default storage class after retry: %v", err) } diff --git a/test/integration/fn_tunnel_cmd.go b/test/integration/fn_tunnel_cmd.go index a21b9350c3..e4598b3da7 100644 --- a/test/integration/fn_tunnel_cmd.go +++ b/test/integration/fn_tunnel_cmd.go @@ -119,7 +119,7 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { } return nil } - if err = retry.Expo(fetch, time.Millisecond*500, Minutes(2), 6); err != nil { + if err = retry.Expo(fetch, time.Millisecond*500, Minutes(2), 13); err != nil { t.Errorf("failed to contact nginx at %s: %v", nginxIP, err) } diff --git a/test/integration/version_upgrade_test.go b/test/integration/version_upgrade_test.go index 0c553b9998..a03676a335 100644 --- a/test/integration/version_upgrade_test.go +++ b/test/integration/version_upgrade_test.go @@ -75,7 +75,7 @@ func TestVersionUpgrade(t *testing.T) { return err } - // Retry to allow flakiness for the previous release + // Retry up to two times, to allow flakiness for the previous release if err := retry.Expo(r, 1*time.Second, Minutes(30), 2); err != nil { t.Fatalf("release start failed: %v", err) } @@ -121,7 +121,7 @@ func TestVersionUpgrade(t *testing.T) { args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) if rr, err := Run(t, exec.CommandContext(ctx, tf.Name(), args...)); err == nil { - t.Fatalf("downgrading kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Args()) + t.Fatalf("downgrading kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Args) } args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) From 83cc28e0826a8b01109af317485d38c9e5b629a3 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Tue, 24 Mar 2020 07:11:22 -0700 Subject: [PATCH 212/668] add logging for when container status is running --- pkg/drivers/kic/oci/oci.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/drivers/kic/oci/oci.go b/pkg/drivers/kic/oci/oci.go index df5117164f..2de446a4cd 100644 --- a/pkg/drivers/kic/oci/oci.go +++ b/pkg/drivers/kic/oci/oci.go @@ -156,6 +156,10 @@ func CreateContainerNode(p CreateParams) error { if s != "running" { return fmt.Errorf("temporary error created container %q is not running yet", p.Name) } + if s == "running" { + glog.Infof("the created container %q has a running status.", p.Name) + return nil + } return nil } From 41b16b1568d83e3fcc23645d701f5f41502d32fe Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Tue, 24 Mar 2020 07:27:07 -0700 Subject: [PATCH 213/668] fail if container is not running --- pkg/drivers/kic/oci/oci.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/drivers/kic/oci/oci.go b/pkg/drivers/kic/oci/oci.go index 2de446a4cd..b0904d586b 100644 --- a/pkg/drivers/kic/oci/oci.go +++ b/pkg/drivers/kic/oci/oci.go @@ -164,8 +164,8 @@ func CreateContainerNode(p CreateParams) error { } // retry up to up 5 seconds to make sure the created container status is running. - if err := retry.Expo(checkRunning, 13*time.Millisecond, time.Second*5); err != nil { - glog.Warningf("The created container %q failed to report to be running in 5 seconds.", p.Name) + if err := retry.Expo(checkRunning, 13*time.Millisecond, time.Second*10); err != nil { + return errors.Wrapf(err, "check container %q running", p.Name) } return nil From 940baa09eae4197074dad120475f54e3576d59b6 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Tue, 24 Mar 2020 07:31:22 -0700 Subject: [PATCH 214/668] fix comment --- pkg/drivers/kic/oci/oci.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/drivers/kic/oci/oci.go b/pkg/drivers/kic/oci/oci.go index b0904d586b..295925b8cf 100644 --- a/pkg/drivers/kic/oci/oci.go +++ b/pkg/drivers/kic/oci/oci.go @@ -163,8 +163,8 @@ func CreateContainerNode(p CreateParams) error { return nil } - // retry up to up 5 seconds to make sure the created container status is running. - if err := retry.Expo(checkRunning, 13*time.Millisecond, time.Second*10); err != nil { + // retry up to up 13 seconds to make sure the created container status is running. + if err := retry.Expo(checkRunning, 13*time.Millisecond, time.Second*13); err != nil { return errors.Wrapf(err, "check container %q running", p.Name) } From fed6713a3fbd1568208bc96104c9e8cac243cb89 Mon Sep 17 00:00:00 2001 From: tstromberg Date: Tue, 24 Mar 2020 08:45:29 -0700 Subject: [PATCH 215/668] Enable HW_RANDOM_VIRTIO --- deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig b/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig index ebf694f191..40c43c77ce 100644 --- a/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig +++ b/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig @@ -380,6 +380,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_8250_DETECT_IRQ=y CONFIG_SERIAL_8250_RSA=y CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_VIRTIO=y # CONFIG_HW_RANDOM_INTEL is not set # CONFIG_HW_RANDOM_AMD is not set CONFIG_NVRAM=y From e9e9a41049fca2d5ad8d7f3d0cfbd9e2953b0ac8 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Tue, 24 Mar 2020 08:58:15 -0700 Subject: [PATCH 216/668] Use ExampleCmd now that it is available --- pkg/minikube/node/start.go | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 7e3e1836b0..5f7b131a6e 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -48,6 +48,7 @@ import ( "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/logs" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/proxy" "k8s.io/minikube/pkg/util" @@ -335,7 +336,7 @@ func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*hos } } - // Try again, but just once to avoid copious error messages + // Try again, but just once to avoid making the logs overly confusing time.Sleep(5 * time.Second) host, exists, err = machine.StartHost(api, cc, n) @@ -344,21 +345,13 @@ func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*hos } out.T(out.FailureType, "StartHost failed again: {{.error}}", out.V{"error": err}) - out.T(out.Workaround, `Run: "{{.cmd}} delete", then "{{.cmd}} start --alsologtostderr -v=1" to try again with more logs`, - out.V{"cmd": minikubeCmd()}) + out.T(out.Workaround, `Run: "{{.delete}}", then "{{.start}} --alsologtostderr -v=1" to try again with more logging`, + out.V{"delete": mustload.ExampleCmd(cc.Name, "delete"), "start": mustload.ExampleCmd(cc.Name, "start")}) exit.WithError("Unable to start VM after repeated tries. Please try {{'minikube delete' if possible", err) return host, exists } -// Return a minikube command containing the current profile name -func minikubeCmd() string { - if viper.GetString(config.ProfileName) != constants.DefaultClusterName { - return fmt.Sprintf("minikube -p %s", config.ProfileName) - } - return "minikube" -} - // validateNetwork tries to catch network problems as soon as possible func validateNetwork(h *host.Host, r command.Runner) string { ip, err := h.Driver.GetIP() From b5d5aa1d52b2212986219515f6bec70d641f3b7f Mon Sep 17 00:00:00 2001 From: Vincent Link Date: Tue, 24 Mar 2020 16:46:00 +0100 Subject: [PATCH 217/668] Parse --disk-size and --memory sizes with binary suffixes --- pkg/util/utils.go | 5 +++-- pkg/util/utils_test.go | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/util/utils.go b/pkg/util/utils.go index fdd38f4f35..37de6085cd 100644 --- a/pkg/util/utils.go +++ b/pkg/util/utils.go @@ -38,12 +38,13 @@ func CalculateSizeInMB(humanReadableSize string) (int, error) { if err == nil { humanReadableSize += "mb" } - size, err := units.FromHumanSize(humanReadableSize) + // parse the size suffix binary instead of decimal so that 1G -> 1024MB instead of 1000MB + size, err := units.RAMInBytes(humanReadableSize) if err != nil { return 0, fmt.Errorf("FromHumanSize: %v", err) } - return int(size / units.MB), nil + return int(size / units.MiB), nil } // GetBinaryDownloadURL returns a suitable URL for the platform diff --git a/pkg/util/utils_test.go b/pkg/util/utils_test.go index f1fe867c48..55392d7ebc 100644 --- a/pkg/util/utils_test.go +++ b/pkg/util/utils_test.go @@ -51,6 +51,7 @@ func TestCalculateSizeInMB(t *testing.T) { {"1024KB", 1}, {"1024mb", 1024}, {"1024b", 0}, + {"1g", 1024}, } for _, tt := range testData { @@ -59,7 +60,7 @@ func TestCalculateSizeInMB(t *testing.T) { t.Fatalf("unexpected err: %v", err) } if number != tt.expectedNumber { - t.Fatalf("Expected '%d'' but got '%d'", tt.expectedNumber, number) + t.Fatalf("Expected '%d' but got '%d' from size '%s'", tt.expectedNumber, number, tt.size) } } } From f00f5ff6008c17652d77626192773927ccf349a3 Mon Sep 17 00:00:00 2001 From: Medya Ghazizadeh Date: Tue, 24 Mar 2020 10:21:32 -0700 Subject: [PATCH 218/668] remove extra if --- pkg/drivers/kic/oci/oci.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pkg/drivers/kic/oci/oci.go b/pkg/drivers/kic/oci/oci.go index 295925b8cf..5885b0e1cf 100644 --- a/pkg/drivers/kic/oci/oci.go +++ b/pkg/drivers/kic/oci/oci.go @@ -156,10 +156,7 @@ func CreateContainerNode(p CreateParams) error { if s != "running" { return fmt.Errorf("temporary error created container %q is not running yet", p.Name) } - if s == "running" { - glog.Infof("the created container %q has a running status.", p.Name) - return nil - } + glog.Infof("the created container %q has a running status.", p.Name) return nil } From 5ef83f35e222c8d4d829dc5776837ee2eccff3cc Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 24 Mar 2020 10:53:02 -0700 Subject: [PATCH 219/668] Add --preload flag to optionally turn off preload --- cmd/minikube/cmd/start.go | 2 ++ go.mod | 2 +- go.sum | 2 ++ pkg/minikube/download/preload.go | 5 +++++ test/integration/start_stop_delete_test.go | 2 +- 5 files changed, 11 insertions(+), 2 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 01f27f68c4..7d6e92cbf6 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -123,6 +123,7 @@ const ( hostOnlyNicType = "host-only-nic-type" natNicType = "nat-nic-type" nodes = "nodes" + preload = "preload" ) var ( @@ -175,6 +176,7 @@ func initMinikubeFlags() { startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.") startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.") startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.") + startCmd.Flags().Bool(preload, true, "If true, download tarball of preloaded images if available to improve start time.") } // initKubernetesFlags inits the commandline flags for kubernetes related options diff --git a/go.mod b/go.mod index 3f4a02bf9f..29a5ac30d5 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/hooklift/iso9660 v0.0.0-20170318115843-1cf07e5970d8 github.com/imdario/mergo v0.3.8 // indirect github.com/intel-go/cpuid v0.0.0-20181003105527-1a4a6f06a1c6 // indirect - github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 + github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d // indirect github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 // indirect diff --git a/go.sum b/go.sum index 70ea938169..d7d0bc3a61 100644 --- a/go.sum +++ b/go.sum @@ -423,6 +423,8 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 h1:XP1VL9iOZu4yz/rq8zj+yvB23XEY5erXRzp8JYmkWu0= github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= +github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f h1:tL0xH80QVHQOde6Qqdohv6PewABH8l8N9pywZtuojJ0= +github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= diff --git a/pkg/minikube/download/preload.go b/pkg/minikube/download/preload.go index 09e0d8d35e..0291a15b93 100644 --- a/pkg/minikube/download/preload.go +++ b/pkg/minikube/download/preload.go @@ -31,6 +31,7 @@ import ( "github.com/golang/glog" "github.com/hashicorp/go-getter" "github.com/pkg/errors" + "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/out" ) @@ -76,6 +77,10 @@ func remoteTarballURL(k8sVersion string) string { // PreloadExists returns true if there is a preloaded tarball that can be used func PreloadExists(k8sVersion, containerRuntime string) bool { + if !viper.GetBool("preload") { + return false + } + if containerRuntime != "docker" { return false } diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index 38ca1d9b9a..6ba37042ef 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -175,7 +175,7 @@ func TestStartStopWithPreload(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) defer CleanupWithLogs(t, profile, cancel) - startArgs := []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true"} + startArgs := []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true", "--preload=false"} startArgs = append(startArgs, StartArgs()...) k8sVersion := "v1.17.0" startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) From 4a7f59f8a8c7da65b4a9ffd737250cf0020548a2 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 24 Mar 2020 10:59:25 -0700 Subject: [PATCH 220/668] remove preload flag --- test/integration/start_stop_delete_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index 6ba37042ef..38ca1d9b9a 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -175,7 +175,7 @@ func TestStartStopWithPreload(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) defer CleanupWithLogs(t, profile, cancel) - startArgs := []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true", "--preload=false"} + startArgs := []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true"} startArgs = append(startArgs, StartArgs()...) k8sVersion := "v1.17.0" startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) From 8d9e26d9940a7c2e89387661dd15b6e6d76cb217 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 24 Mar 2020 11:58:41 -0700 Subject: [PATCH 221/668] Test restart with preloaded tarball and without --- test/integration/start_stop_delete_test.go | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index 38ca1d9b9a..f0aeb03450 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -175,7 +175,7 @@ func TestStartStopWithPreload(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) defer CleanupWithLogs(t, profile, cancel) - startArgs := []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true"} + startArgs := []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true", "--preload=false"} startArgs = append(startArgs, StartArgs()...) k8sVersion := "v1.17.0" startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) @@ -191,6 +191,18 @@ func TestStartStopWithPreload(t *testing.T) { if err != nil { t.Fatalf("%s failed: %v", rr.Args, err) } + + // Restart again with v1.17.0, this time with the preloaded tarball + startArgs = []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true"} + startArgs = append(startArgs, StartArgs()...) + startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) + + rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) + if err != nil { + t.Fatalf("%s failed: %v", rr.Args, err) + } + verifyImageExistsInDaemon(ctx, t, profile, image) + // Restart minikube with v1.17.3, which has a preloaded tarball startArgs = []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true"} startArgs = append(startArgs, StartArgs()...) @@ -200,9 +212,13 @@ func TestStartStopWithPreload(t *testing.T) { if err != nil { t.Fatalf("%s failed: %v", rr.Args, err) } + verifyImageExistsInDaemon(ctx, t, profile, image) +} + +func verifyImageExistsInDaemon(ctx context.Context, t *testing.T, profile, image string) { // Ensure that busybox still exists in the daemon - rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "images")) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "images")) if err != nil { t.Fatalf("%s failed: %v", rr.Args, err) } From 0d76e2a68477c09dc6ba924d9504d972672063f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Tue, 24 Mar 2020 20:19:01 +0100 Subject: [PATCH 222/668] Avoid dereferencing nil, in case of an error --- pkg/minikube/assets/vm_assets.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/minikube/assets/vm_assets.go b/pkg/minikube/assets/vm_assets.go index c3f30a667d..e3b4678544 100644 --- a/pkg/minikube/assets/vm_assets.go +++ b/pkg/minikube/assets/vm_assets.go @@ -120,7 +120,10 @@ func (f *FileAsset) GetLength() (flen int) { // GetModTime returns modification time of the file func (f *FileAsset) GetModTime() (time.Time, error) { fi, err := os.Stat(f.AssetName) - return fi.ModTime(), err + if err != nil { + return time.Time{}, err + } + return fi.ModTime(), nil } // Read reads the asset From 972ff3cc00099ec52cb15c0594c767d66560e842 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Tue, 24 Mar 2020 12:21:27 -0700 Subject: [PATCH 223/668] Update DefaultKubernetesVersion to v1.18.0 --- pkg/minikube/bootstrapper/bsutil/kubelet_test.go | 8 ++++---- pkg/minikube/constants/constants.go | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go index 052b9937dd..660b8e5b91 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go @@ -81,7 +81,7 @@ Wants=crio.service [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m +ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m [Install] `, @@ -107,7 +107,7 @@ Wants=containerd.service [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m +ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m [Install] `, @@ -140,7 +140,7 @@ Wants=containerd.service [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m +ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m [Install] `, @@ -167,7 +167,7 @@ Wants=docker.socket [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.2 --pod-manifest-path=/etc/kubernetes/manifests +ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.2 --pod-manifest-path=/etc/kubernetes/manifests [Install] `, diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index 33ae576f48..8b181152a7 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -26,9 +26,9 @@ import ( const ( // DefaultKubernetesVersion is the default kubernetes version - DefaultKubernetesVersion = "v1.18.0-rc.1" + DefaultKubernetesVersion = "v1.18.0" // NewestKubernetesVersion is the newest Kubernetes version to test against - NewestKubernetesVersion = "v1.18.0-rc.1" + NewestKubernetesVersion = "v1.18.0" // OldestKubernetesVersion is the oldest Kubernetes version to test against OldestKubernetesVersion = "v1.11.10" // DefaultClusterName is the default nane for the k8s cluster From e3bcdbf42ac35c685e3f658eec768d2efdff69b2 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 24 Mar 2020 12:55:31 -0700 Subject: [PATCH 224/668] fix validateSpecificDriver error message --- cmd/minikube/cmd/start.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 01f27f68c4..302bf72a0a 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -577,7 +577,7 @@ func validateSpecifiedDriver(existing *config.ClusterConfig) { out.ErrT(out.Workaround, `To proceed, either: -1) Delete the existing "{{.name}}" cluster using: '{{.command}} delete' +1) Delete the existing "{{.name}}" cluster using: 'minikube delete -p {{.name}}' * or * From 2933953cfa2c95b1d135d449b536c3d2e909c38c Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 24 Mar 2020 13:18:59 -0700 Subject: [PATCH 225/668] use mustload --- cmd/minikube/cmd/start.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 302bf72a0a..60d2cce40b 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -577,12 +577,12 @@ func validateSpecifiedDriver(existing *config.ClusterConfig) { out.ErrT(out.Workaround, `To proceed, either: -1) Delete the existing "{{.name}}" cluster using: 'minikube delete -p {{.name}}' +1) Delete the existing "{{.name}}" cluster using: '{{.delcommand}}' * or * 2) Start the existing "{{.name}}" cluster using: '{{.command}} --driver={{.old}}' -`, out.V{"command": mustload.ExampleCmd(existing.Name, "start"), "old": old, "name": existing.Name}) +`, out.V{"command": mustload.ExampleCmd(existing.Name, "start"), "delcommand": mustload.ExampleCmd(existing.Name, "delete"), "old": old, "name": existing.Name}) exit.WithCodeT(exit.Config, "Exiting.") } From b5e088ad59bb4c320d335633bc0d420223932a6b Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 24 Mar 2020 13:44:51 -0700 Subject: [PATCH 226/668] run preload on hot restarts as well, in case kubernetes version has been upgraded --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index a6c1bbe3dd..5de297a003 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -524,17 +524,22 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "kubeadm images") } - if cfg.KubernetesConfig.ShouldLoadCachedImages { - if err := machine.LoadImages(&cfg, k.c, images, constants.ImageCacheDir); err != nil { - out.FailureT("Unable to load cached images: {{.error}}", out.V{"error": err}) - } - } r, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c, Socket: cfg.KubernetesConfig.CRISocket}) if err != nil { return errors.Wrap(err, "runtime") } + if err := r.Preload(cfg.KubernetesConfig); err != nil { + return errors.Wrap(err, "preloading") + } + + if cfg.KubernetesConfig.ShouldLoadCachedImages { + if err := machine.LoadImages(&cfg, k.c, images, constants.ImageCacheDir); err != nil { + out.FailureT("Unable to load cached images: {{.error}}", out.V{"error": err}) + } + } + for _, n := range cfg.Nodes { err := k.UpdateNode(cfg, n, r) if err != nil { From 3ad1dfb4bb3d8e4a016a3aca1585a3b07b4b78e9 Mon Sep 17 00:00:00 2001 From: Vincent Link Date: Tue, 24 Mar 2020 20:19:13 +0100 Subject: [PATCH 227/668] Add 'stable' and 'latest' as valid kubernetes-version values --- cmd/minikube/cmd/start.go | 22 ++++++++++++---------- cmd/minikube/cmd/start_test.go | 12 +++++++++++- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 01f27f68c4..595058f4f9 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -179,7 +179,7 @@ func initMinikubeFlags() { // initKubernetesFlags inits the commandline flags for kubernetes related options func initKubernetesFlags() { - startCmd.Flags().String(kubernetesVersion, "", "The kubernetes version that the minikube VM will use (ex: v1.2.3)") + startCmd.Flags().String(kubernetesVersion, "", fmt.Sprintf("The kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for %s, 'latest' for %s). Defaults to 'stable'.", constants.DefaultKubernetesVersion, constants.NewestKubernetesVersion)) startCmd.Flags().Var(&config.ExtraOptions, "extra-config", `A set of key=value pairs that describe configuration that may be passed to different components. The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. @@ -480,10 +480,10 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState { if vmd := viper.GetString("vm-driver"); vmd != "" { // Output a warning warning := `Both driver={{.driver}} and vm-driver={{.vmd}} have been set. - + Since vm-driver is deprecated, minikube will default to driver={{.driver}}. - If vm-driver is set in the global config, please run "minikube config unset vm-driver" to resolve this warning. + If vm-driver is set in the global config, please run "minikube config unset vm-driver" to resolve this warning. ` out.T(out.Warning, warning, out.V{"driver": d, "vmd": vmd}) } @@ -1065,13 +1065,15 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) { func getKubernetesVersion(old *config.ClusterConfig) string { paramVersion := viper.GetString(kubernetesVersion) - if paramVersion == "" { // if the user did not specify any version then ... - if old != nil { // .. use the old version from config (if any) - paramVersion = old.KubernetesConfig.KubernetesVersion - } - if paramVersion == "" { // .. otherwise use the default version - paramVersion = constants.DefaultKubernetesVersion - } + // try to load the old version first if the user didn't specify anything + if paramVersion == "" && old != nil { + paramVersion = old.KubernetesConfig.KubernetesVersion + } + + if paramVersion == "" || strings.EqualFold(paramVersion, "stable") { + paramVersion = constants.DefaultKubernetesVersion + } else if strings.EqualFold(paramVersion, "latest") { + paramVersion = constants.NewestKubernetesVersion } nvs, err := semver.Make(strings.TrimPrefix(paramVersion, version.VersionPrefix)) diff --git a/cmd/minikube/cmd/start_test.go b/cmd/minikube/cmd/start_test.go index ef7e4b7403..0a443aa507 100644 --- a/cmd/minikube/cmd/start_test.go +++ b/cmd/minikube/cmd/start_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/minikube/pkg/minikube/constants" ) -func TestGetKuberneterVersion(t *testing.T) { +func TestGetKubernetesVersion(t *testing.T) { var tests = []struct { description string expectedVersion string @@ -55,6 +55,16 @@ func TestGetKuberneterVersion(t *testing.T) { paramVersion: "v1.16.0", cfg: &cfg.ClusterConfig{KubernetesConfig: cfg.KubernetesConfig{KubernetesVersion: "v1.15.0"}}, }, + { + description: "kubernetes-version given as 'stable', no config", + expectedVersion: constants.DefaultKubernetesVersion, + paramVersion: "stable", + }, + { + description: "kubernetes-version given as 'latest', no config", + expectedVersion: constants.NewestKubernetesVersion, + paramVersion: "latest", + }, } for _, test := range tests { From 28672dec786c49417879e3159e3100018db2a5fd Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 24 Mar 2020 14:43:11 -0700 Subject: [PATCH 228/668] remove ClusterNameFromMachine --- cmd/minikube/cmd/status.go | 12 +++++++----- pkg/minikube/driver/driver.go | 10 +--------- pkg/provision/buildroot.go | 4 +++- pkg/provision/provision.go | 8 +++++--- pkg/provision/ubuntu.go | 2 +- 5 files changed, 17 insertions(+), 19 deletions(-) diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 0db9e57ea8..7b44488e1c 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -31,6 +31,7 @@ import ( "github.com/spf13/cobra" "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify" "k8s.io/minikube/pkg/minikube/cluster" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" @@ -107,7 +108,7 @@ var statusCmd = &cobra.Command{ for _, n := range cc.Nodes { glog.Infof("checking status of %s ...", n.Name) machineName := driver.MachineName(*cc, n) - st, err = status(api, machineName, n.ControlPlane) + st, err = status(api, *cc, n) glog.Infof("%s status: %+v", machineName, st) if err != nil { @@ -150,12 +151,12 @@ func exitCode(st *Status) int { return c } -func status(api libmachine.API, name string, controlPlane bool) (*Status, error) { +func status(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status, error) { - profile, node := driver.ClusterNameFromMachine(name) + controlPlane := n.ControlPlane st := &Status{ - Name: node, + Name: n.Name, Host: Nonexistent, APIServer: Nonexistent, Kubelet: Nonexistent, @@ -163,6 +164,7 @@ func status(api libmachine.API, name string, controlPlane bool) (*Status, error) Worker: !controlPlane, } + name := driver.MachineName(cc, n) hs, err := machine.Status(api, name) glog.Infof("%s host status = %q (err=%v)", name, hs, err) if err != nil { @@ -205,7 +207,7 @@ func status(api libmachine.API, name string, controlPlane bool) (*Status, error) } if st.Kubeconfig != Irrelevant { - ok, err := kubeconfig.IsClusterInConfig(ip, profile) + ok, err := kubeconfig.IsClusterInConfig(ip, cc.Name) glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err) if ok { st.Kubeconfig = Configured diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index b6106474d8..e064f70799 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -234,13 +234,5 @@ func MachineName(cc config.ClusterConfig, n config.Node) string { if len(cc.Nodes) == 1 || n.ControlPlane { return cc.Name } - return fmt.Sprintf("%s---%s", cc.Name, n.Name) -} - -// ClusterNameFromMachine retrieves the cluster name embedded in the machine name -func ClusterNameFromMachine(name string) (string, string) { - if strings.Contains(name, "---") { - return strings.Split(name, "---")[0], strings.Split(name, "---")[1] - } - return name, name + return fmt.Sprintf("%s-%s", cc.Name, n.Name) } diff --git a/pkg/provision/buildroot.go b/pkg/provision/buildroot.go index 913d8b34d0..1a9e49338f 100644 --- a/pkg/provision/buildroot.go +++ b/pkg/provision/buildroot.go @@ -29,6 +29,8 @@ import ( "github.com/docker/machine/libmachine/provision/pkgaction" "github.com/docker/machine/libmachine/swarm" "github.com/golang/glog" + "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/util/retry" ) @@ -180,7 +182,7 @@ func (p *BuildrootProvisioner) Provision(swarmOptions swarm.Options, authOptions } glog.Infof("setting minikube options for container-runtime") - if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p); err != nil { + if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), viper.GetString(config.ProfileName), p); err != nil { glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index 0a297d50d8..4f7aed6cc0 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -36,10 +36,10 @@ import ( "github.com/docker/machine/libmachine/swarm" "github.com/golang/glog" "github.com/pkg/errors" + "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/sshutil" ) @@ -76,6 +76,9 @@ func NewSystemdProvisioner(osReleaseID string, d drivers.Driver) provision.Syste DaemonOptionsFile: "/etc/systemd/system/docker.service.d/10-machine.conf", OsReleaseID: osReleaseID, Driver: d, + SwarmOptions: swarm.Options{ + ArbitraryFlags: []string{viper.GetString(config.ProfileName)}, + }, }, } } @@ -208,8 +211,7 @@ func setRemoteAuthOptions(p provision.Provisioner) auth.Options { return authOptions } -func setContainerRuntimeOptions(name string, p miniProvisioner) error { - cluster, _ := driver.ClusterNameFromMachine(name) +func setContainerRuntimeOptions(name string, cluster string, p miniProvisioner) error { c, err := config.Load(cluster) if err != nil { return errors.Wrap(err, "getting cluster config") diff --git a/pkg/provision/ubuntu.go b/pkg/provision/ubuntu.go index 9d2b272bd2..c0aaf46780 100644 --- a/pkg/provision/ubuntu.go +++ b/pkg/provision/ubuntu.go @@ -185,7 +185,7 @@ func (p *UbuntuProvisioner) Provision(swarmOptions swarm.Options, authOptions au } glog.Infof("setting minikube options for container-runtime") - if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p); err != nil { + if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p.SwarmOptions.ArbitraryFlags[0], p); err != nil { glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } From 1d72a1c8263fe1d40a81d5c3faa24ca532cc7e44 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 24 Mar 2020 15:01:31 -0700 Subject: [PATCH 229/668] don't extract volume if tarball doesn't exist --- pkg/drivers/kic/kic.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index 2055f91931..6e05e2de05 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -121,6 +121,10 @@ func (d *Driver) Create() error { return errors.Wrap(err, "prepare kic ssh") } + // If preload doesn't exist, don't both extracting tarball to volume + if !download.PreloadExists(d.NodeConfig.KubernetesVersion, d.NodeConfig.ContainerRuntime) { + return nil + } t := time.Now() glog.Infof("Starting extracting preloaded images to volume") // Extract preloaded images to container From 4a24cfcf30ee267507b74d022b3bf1e2ffc5c072 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 24 Mar 2020 15:54:39 -0700 Subject: [PATCH 230/668] add cluster name to provisioner object directly --- pkg/provision/buildroot.go | 4 +++- pkg/provision/provision.go | 4 ---- pkg/provision/ubuntu.go | 5 ++++- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/pkg/provision/buildroot.go b/pkg/provision/buildroot.go index 1a9e49338f..723e24c72f 100644 --- a/pkg/provision/buildroot.go +++ b/pkg/provision/buildroot.go @@ -37,12 +37,14 @@ import ( // BuildrootProvisioner provisions the custom system based on Buildroot type BuildrootProvisioner struct { provision.SystemdProvisioner + clusterName string } // NewBuildrootProvisioner creates a new BuildrootProvisioner func NewBuildrootProvisioner(d drivers.Driver) provision.Provisioner { return &BuildrootProvisioner{ NewSystemdProvisioner("buildroot", d), + viper.GetString(config.ProfileName), } } @@ -182,7 +184,7 @@ func (p *BuildrootProvisioner) Provision(swarmOptions swarm.Options, authOptions } glog.Infof("setting minikube options for container-runtime") - if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), viper.GetString(config.ProfileName), p); err != nil { + if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p.clusterName, p); err != nil { glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index 4f7aed6cc0..19e6f0c43a 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -36,7 +36,6 @@ import ( "github.com/docker/machine/libmachine/swarm" "github.com/golang/glog" "github.com/pkg/errors" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" @@ -76,9 +75,6 @@ func NewSystemdProvisioner(osReleaseID string, d drivers.Driver) provision.Syste DaemonOptionsFile: "/etc/systemd/system/docker.service.d/10-machine.conf", OsReleaseID: osReleaseID, Driver: d, - SwarmOptions: swarm.Options{ - ArbitraryFlags: []string{viper.GetString(config.ProfileName)}, - }, }, } } diff --git a/pkg/provision/ubuntu.go b/pkg/provision/ubuntu.go index c0aaf46780..2fc32e67a3 100644 --- a/pkg/provision/ubuntu.go +++ b/pkg/provision/ubuntu.go @@ -29,6 +29,8 @@ import ( "github.com/docker/machine/libmachine/provision/pkgaction" "github.com/docker/machine/libmachine/swarm" "github.com/golang/glog" + "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/util/retry" ) @@ -42,6 +44,7 @@ func NewUbuntuProvisioner(d drivers.Driver) provision.Provisioner { return &UbuntuProvisioner{ BuildrootProvisioner{ NewSystemdProvisioner("ubuntu", d), + viper.GetString(config.ProfileName), }, } } @@ -185,7 +188,7 @@ func (p *UbuntuProvisioner) Provision(swarmOptions swarm.Options, authOptions au } glog.Infof("setting minikube options for container-runtime") - if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p.SwarmOptions.ArbitraryFlags[0], p); err != nil { + if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p.clusterName, p); err != nil { glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } From 06dca4555357990d14e27e4b70066fda5232e9e8 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 24 Mar 2020 16:10:20 -0700 Subject: [PATCH 231/668] lint --- pkg/provision/buildroot.go | 2 +- pkg/provision/provision.go | 4 ++-- pkg/provision/ubuntu.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/provision/buildroot.go b/pkg/provision/buildroot.go index 723e24c72f..2f630f9332 100644 --- a/pkg/provision/buildroot.go +++ b/pkg/provision/buildroot.go @@ -184,7 +184,7 @@ func (p *BuildrootProvisioner) Provision(swarmOptions swarm.Options, authOptions } glog.Infof("setting minikube options for container-runtime") - if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p.clusterName, p); err != nil { + if err := setContainerRuntimeOptions(p.clusterName, p); err != nil { glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index 19e6f0c43a..7b2e9e6539 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -207,8 +207,8 @@ func setRemoteAuthOptions(p provision.Provisioner) auth.Options { return authOptions } -func setContainerRuntimeOptions(name string, cluster string, p miniProvisioner) error { - c, err := config.Load(cluster) +func setContainerRuntimeOptions(name string, p miniProvisioner) error { + c, err := config.Load(name) if err != nil { return errors.Wrap(err, "getting cluster config") } diff --git a/pkg/provision/ubuntu.go b/pkg/provision/ubuntu.go index 2fc32e67a3..3fbf006b69 100644 --- a/pkg/provision/ubuntu.go +++ b/pkg/provision/ubuntu.go @@ -188,7 +188,7 @@ func (p *UbuntuProvisioner) Provision(swarmOptions swarm.Options, authOptions au } glog.Infof("setting minikube options for container-runtime") - if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p.clusterName, p); err != nil { + if err := setContainerRuntimeOptions(p.clusterName, p); err != nil { glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } From 0e897f8448bfb79b8d58b20b7b266b3dd54a808e Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 24 Mar 2020 16:25:57 -0700 Subject: [PATCH 232/668] Only run preload if it exists --- pkg/minikube/cruntime/docker.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/minikube/cruntime/docker.go b/pkg/minikube/cruntime/docker.go index 8641092573..843c7529f8 100644 --- a/pkg/minikube/cruntime/docker.go +++ b/pkg/minikube/cruntime/docker.go @@ -290,6 +290,9 @@ func (r *Docker) SystemLogCmd(len int) string { // 2. Extract the preloaded tarball to the correct directory // 3. Remove the tarball within the VM func (r *Docker) Preload(cfg config.KubernetesConfig) error { + if !download.PreloadExists(cfg.KubernetesVersion, cfg.ContainerRuntime) { + return nil + } k8sVersion := cfg.KubernetesVersion // If images already exist, return From 744f76e3b677f7164dbdbc5d7b60d3c328de9376 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 24 Mar 2020 17:25:27 -0700 Subject: [PATCH 233/668] fix lint --- test/integration/aaa_download_only_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index c6c1c203ef..dc76967ce1 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -74,7 +74,7 @@ func TestDownloadOnly(t *testing.T) { if !NoneDriver() { if download.PreloadExists(v, r) { // Just make sure the tarball path exists - if _, err := os.Stat(download.TarballPath(v)); err != nil { + if _, err := os.Stat(download.TarballPath(v, r)); err != nil { t.Errorf("preloaded tarball path doesn't exist: %v", err) } return @@ -154,6 +154,8 @@ func TestDownloadOnlyKic(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(15)) defer Cleanup(t, profile, cancel) + cRuntime := "docker" + args := []string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr"} args = append(args, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) From 455ebeac0130a245e524b385917bd24737cbf7fd Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 24 Mar 2020 17:28:57 -0700 Subject: [PATCH 234/668] update test, don't need to rerun v1.17.0 since those images exist anyway --- test/integration/start_stop_delete_test.go | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index f0aeb03450..e948f4b6ef 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -192,17 +192,6 @@ func TestStartStopWithPreload(t *testing.T) { t.Fatalf("%s failed: %v", rr.Args, err) } - // Restart again with v1.17.0, this time with the preloaded tarball - startArgs = []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true"} - startArgs = append(startArgs, StartArgs()...) - startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) - - rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) - if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) - } - verifyImageExistsInDaemon(ctx, t, profile, image) - // Restart minikube with v1.17.3, which has a preloaded tarball startArgs = []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true"} startArgs = append(startArgs, StartArgs()...) @@ -212,13 +201,7 @@ func TestStartStopWithPreload(t *testing.T) { if err != nil { t.Fatalf("%s failed: %v", rr.Args, err) } - verifyImageExistsInDaemon(ctx, t, profile, image) - -} - -func verifyImageExistsInDaemon(ctx context.Context, t *testing.T, profile, image string) { - // Ensure that busybox still exists in the daemon - rr, err := Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "images")) + rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "images")) if err != nil { t.Fatalf("%s failed: %v", rr.Args, err) } From ab4b7d92b380b477801280289aaf2531454e11e9 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Tue, 24 Mar 2020 17:36:04 -0700 Subject: [PATCH 235/668] Flush logs before writing to stdout --- pkg/minikube/out/out.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/minikube/out/out.go b/pkg/minikube/out/out.go index 9ecb23c053..38c817339b 100644 --- a/pkg/minikube/out/out.go +++ b/pkg/minikube/out/out.go @@ -68,6 +68,9 @@ func T(style StyleEnum, format string, a ...V) { // String writes a basic formatted string to stdout func String(format string, a ...interface{}) { + // Flush log buffer so that output order makes sense + glog.Flush() + if outFile == nil { glog.Warningf("[unset outFile]: %s", fmt.Sprintf(format, a...)) return From d3fcd40feba97e809d115839bc45be45831730a0 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Tue, 24 Mar 2020 17:37:59 -0700 Subject: [PATCH 236/668] Use correct preload paths on Windows, announce base image pull --- pkg/minikube/download/preload.go | 12 ++++++------ pkg/minikube/node/cache.go | 1 + 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/pkg/minikube/download/preload.go b/pkg/minikube/download/preload.go index 09e0d8d35e..4a4979acc0 100644 --- a/pkg/minikube/download/preload.go +++ b/pkg/minikube/download/preload.go @@ -23,7 +23,7 @@ import ( "io/ioutil" "net/http" "os" - "path" + "path/filepath" "cloud.google.com/go/storage" "google.golang.org/api/option" @@ -59,14 +59,14 @@ func targetDir() string { return localpath.MakeMiniPath("cache", "preloaded-tarball") } -// PreloadChecksumPath returns path to checksum file +// PreloadChecksumPath returns the local path to the cached checksum file func PreloadChecksumPath(k8sVersion string) string { - return path.Join(targetDir(), checksumName(k8sVersion)) + return filepath.Join(targetDir(), checksumName(k8sVersion)) } -// TarballPath returns the path to the preloaded tarball +// TarballPath returns the local path to the cached preload tarball func TarballPath(k8sVersion string) string { - return path.Join(targetDir(), TarballName(k8sVersion)) + return filepath.Join(targetDir(), TarballName(k8sVersion)) } // remoteTarballURL returns the URL for the remote tarball in GCS @@ -122,7 +122,7 @@ func Preload(k8sVersion, containerRuntime string) error { return nil } - out.T(out.FileDownload, "Downloading preloaded images tarball for k8s {{.version}} ...", out.V{"version": k8sVersion}) + out.T(out.FileDownload, "Downloading Kubernetes {{.version}} preload ...", out.V{"version": k8sVersion}) url := remoteTarballURL(k8sVersion) tmpDst := targetPath + ".download" diff --git a/pkg/minikube/node/cache.go b/pkg/minikube/node/cache.go index f1b3ac8f32..ebeb04fdc0 100644 --- a/pkg/minikube/node/cache.go +++ b/pkg/minikube/node/cache.go @@ -100,6 +100,7 @@ func doCacheBinaries(k8sVersion string) error { // BeginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available func beginDownloadKicArtifacts(g *errgroup.Group) { + out.T(out.Pulling, "Pulling base image ...") glog.Info("Beginning downloading kic artifacts") g.Go(func() error { glog.Infof("Downloading %s to local daemon", kic.BaseImage) From 2e9054f495ddc99cecf2c80daef1dbe7a99ac641 Mon Sep 17 00:00:00 2001 From: ZouYu Date: Wed, 25 Mar 2020 11:08:27 +0800 Subject: [PATCH 237/668] add unit test for pkg/drivers/hyperkit/iso.go Signed-off-by: ZouYu --- pkg/drivers/hyperkit/iso_test.go | 91 ++++++++++++++++++++++++++++++ pkg/drivers/hyperkit/iso_test.iso | Bin 0 -> 362496 bytes 2 files changed, 91 insertions(+) create mode 100644 pkg/drivers/hyperkit/iso_test.go create mode 100644 pkg/drivers/hyperkit/iso_test.iso diff --git a/pkg/drivers/hyperkit/iso_test.go b/pkg/drivers/hyperkit/iso_test.go new file mode 100644 index 0000000000..8dda1e4a6e --- /dev/null +++ b/pkg/drivers/hyperkit/iso_test.go @@ -0,0 +1,91 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hyperkit + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestExtractFile(t *testing.T) { + testDir, err := ioutil.TempDir(os.TempDir(), "") + if nil != err { + return + } + defer os.Remove(testDir) + + tests := []struct { + name string + isoPath string + srcPath string + destPath string + expectedError bool + }{ + { + name: "all is right", + isoPath: "iso_test.iso", + srcPath: "/test1.txt", + destPath: testDir + "/test1.txt", + expectedError: false, + }, + { + name: "isoPath is error", + isoPath: "tests.iso", + srcPath: "/test1.txt", + destPath: testDir + "/test1.txt", + expectedError: true, + }, + { + name: "srcPath is empty", + isoPath: "iso_tests.iso", + srcPath: "", + destPath: testDir + "/test1.txt", + expectedError: true, + }, + { + name: "srcPath is error", + isoPath: "iso_tests.iso", + srcPath: "/t1.txt", + destPath: testDir + "/test1.txt", + expectedError: true, + }, + { + name: "destPath is empty", + isoPath: "iso_test.iso", + srcPath: "/test1.txt", + destPath: "", + expectedError: true, + }, + { + name: "find files in a folder", + isoPath: "./iso_test.iso", + srcPath: "/test2/test2.txt", + destPath: testDir + "/test2.txt", + expectedError: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ExtractFile(tt.isoPath, tt.srcPath, tt.destPath) + if (nil != err) != tt.expectedError { + t.Errorf("expectedError = %v, get = %v", tt.expectedError, err) + return + } + }) + } +} diff --git a/pkg/drivers/hyperkit/iso_test.iso b/pkg/drivers/hyperkit/iso_test.iso new file mode 100644 index 0000000000000000000000000000000000000000..dbed69dd413fd8b02f0f019e33ac700e6f75d699 GIT binary patch literal 362496 zcmeI(+fEZf7y#f|4Wu5pQp4fGOt>(bVA-|=(sot@pze!J2-Y%)CV4T@MbVt02p-#$E;#e?Cf zn4Qha;yCuFMX#JpEwAHZ*)|h*GjlCA`A1G zo3s8e0`-5IErCZM@arP}KZ2>40s#U92oNAZfB*pk1PBlyFeicRe2u+YO%F@w>+o49 zi&@#8)2}%#0RjXF5FkK+009C72oNCf5CZAVzghyQs-@Ig3z_%-1fdWhK!5-N0t5&U zAV7csfkhIyJIC&p5pS=u@UpG2UX!dZ*Y$GB(|frbt8Xb!??qZtXf?~z@>A>GRkq*X zbNZGnKh%b{1PBlyKwypnn|H+H?Cvc5D*kWOGu*fq|6j+d_bUFk>-ay%OL1xf1PBly zK;WJN-@ktQ-g Date: Tue, 24 Mar 2020 20:12:49 -0700 Subject: [PATCH 238/668] Use runtime.GOARCH since we want amd64, not runtime.GOOS --- pkg/minikube/download/preload.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/download/preload.go b/pkg/minikube/download/preload.go index 0fe1ea7778..fdf5fd43bd 100644 --- a/pkg/minikube/download/preload.go +++ b/pkg/minikube/download/preload.go @@ -47,7 +47,7 @@ const ( // TarballName returns name of the tarball func TarballName(k8sVersion, containerRuntime string) string { - return fmt.Sprintf("preloaded-images-k8s-%s-%s-%s-overlay2-%s.tar.lz4", PreloadVersion, k8sVersion, containerRuntime, runtime.GOOS) + return fmt.Sprintf("preloaded-images-k8s-%s-%s-%s-overlay2-%s.tar.lz4", PreloadVersion, k8sVersion, containerRuntime, runtime.GOARCH) } // returns the name of the checksum file From 646609ec30fcd5aa4f0e7c0be029bbad46873e55 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 24 Mar 2020 21:24:43 -0700 Subject: [PATCH 239/668] review comments --- cmd/minikube/cmd/start.go | 2 +- pkg/minikube/download/preload.go | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 7d6e92cbf6..ef568571ba 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -176,7 +176,7 @@ func initMinikubeFlags() { startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.") startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.") startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.") - startCmd.Flags().Bool(preload, true, "If true, download tarball of preloaded images if available to improve start time.") + startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.") } // initKubernetesFlags inits the commandline flags for kubernetes related options diff --git a/pkg/minikube/download/preload.go b/pkg/minikube/download/preload.go index 0291a15b93..9016371d68 100644 --- a/pkg/minikube/download/preload.go +++ b/pkg/minikube/download/preload.go @@ -81,6 +81,9 @@ func PreloadExists(k8sVersion, containerRuntime string) bool { return false } + // See https://github.com/kubernetes/minikube/issues/6933 + // and https://github.com/kubernetes/minikube/issues/6934 + // to track status of adding containerd & crio if containerRuntime != "docker" { return false } From 1be46bceec192a87b0525039d8d91076389505cc Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 01:12:27 -0700 Subject: [PATCH 240/668] implement Preload check for crio and containerd --- pkg/minikube/cruntime/containerd.go | 4 ++++ pkg/minikube/cruntime/crio.go | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/pkg/minikube/cruntime/containerd.go b/pkg/minikube/cruntime/containerd.go index 67beb710a7..89a2e3912a 100644 --- a/pkg/minikube/cruntime/containerd.go +++ b/pkg/minikube/cruntime/containerd.go @@ -30,6 +30,7 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/out" ) @@ -313,5 +314,8 @@ func (r *Containerd) SystemLogCmd(len int) string { // Preload preloads the container runtime with k8s images func (r *Containerd) Preload(cfg config.KubernetesConfig) error { + if !download.PreloadExists(cfg.KubernetesVersion, cfg.ContainerRuntime) { + return nil + } return fmt.Errorf("not yet implemented for %s", r.Name()) } diff --git a/pkg/minikube/cruntime/crio.go b/pkg/minikube/cruntime/crio.go index fff4a8c270..5678a9d6a4 100644 --- a/pkg/minikube/cruntime/crio.go +++ b/pkg/minikube/cruntime/crio.go @@ -26,6 +26,7 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/out" ) @@ -230,5 +231,8 @@ func (r *CRIO) SystemLogCmd(len int) string { // Preload preloads the container runtime with k8s images func (r *CRIO) Preload(cfg config.KubernetesConfig) error { + if !download.PreloadExists(cfg.KubernetesVersion, cfg.ContainerRuntime) { + return nil + } return fmt.Errorf("not yet implemented for %s", r.Name()) } From f42572f29ce69f7b015cd0d00c623dafe9c13a88 Mon Sep 17 00:00:00 2001 From: Marcin Niemira Date: Wed, 25 Mar 2020 22:44:40 +1100 Subject: [PATCH 241/668] Add TestMiniPath test --- pkg/minikube/localpath/localpath.go | 9 +++--- pkg/minikube/localpath/localpath_test.go | 39 ++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 4 deletions(-) diff --git a/pkg/minikube/localpath/localpath.go b/pkg/minikube/localpath/localpath.go index 6bc9ef1239..1ac1172b6a 100644 --- a/pkg/minikube/localpath/localpath.go +++ b/pkg/minikube/localpath/localpath.go @@ -38,13 +38,14 @@ func ConfigFile() string { // MiniPath returns the path to the user's minikube dir func MiniPath() string { - if os.Getenv(MinikubeHome) == "" { + minikubeHomeEnv := os.Getenv(MinikubeHome) + if minikubeHomeEnv == "" { return filepath.Join(homedir.HomeDir(), ".minikube") } - if filepath.Base(os.Getenv(MinikubeHome)) == ".minikube" { - return os.Getenv(MinikubeHome) + if filepath.Base(minikubeHomeEnv) == ".minikube" { + return minikubeHomeEnv } - return filepath.Join(os.Getenv(MinikubeHome), ".minikube") + return filepath.Join(minikubeHomeEnv, ".minikube") } // MakeMiniPath is a utility to calculate a relative path to our directory. diff --git a/pkg/minikube/localpath/localpath_test.go b/pkg/minikube/localpath/localpath_test.go index 173ca5df88..81b40722dc 100644 --- a/pkg/minikube/localpath/localpath_test.go +++ b/pkg/minikube/localpath/localpath_test.go @@ -17,10 +17,15 @@ limitations under the License. package localpath import ( + "fmt" "io/ioutil" "os" + "path/filepath" "runtime" + "strings" "testing" + + "k8s.io/client-go/util/homedir" ) func TestReplaceWinDriveLetterToVolumeName(t *testing.T) { @@ -61,3 +66,37 @@ func TestHasWindowsDriveLetter(t *testing.T) { } } } + +func TestConfigFile(t *testing.T) { + configFile := ConfigFile() + if !strings.Contains(configFile, "config.json") { + t.Errorf("ConfigFile returned path without 'config.json': %s", configFile) + } +} + +func TestMiniPath(t *testing.T) { + var testCases = []struct { + env, basePath string + }{ + {"/tmp/.minikube", "/tmp/"}, + {"/tmp/", "/tmp"}, + {"", homedir.HomeDir()}, + } + for _, tc := range testCases { + originalEnv := os.Getenv(MinikubeHome) + defer func() { // revert to pre-test env var + err := os.Setenv(MinikubeHome, originalEnv) + if err != nil { + t.Fatalf("Error reverting env %s to its original value (%s) var after test ", MinikubeHome, originalEnv) + } + }() + t.Run(fmt.Sprintf("%s", tc.env), func(t *testing.T) { + expectedPath := filepath.Join(tc.basePath, ".minikube") + os.Setenv(MinikubeHome, tc.env) + path := MiniPath() + if path != expectedPath { + t.Errorf("MiniPath expected to return '%s', but got '%s'", expectedPath, path) + } + }) + } +} From 4383a4883c8a5050097c9568fa7a1740ea152a4d Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 07:28:23 -0700 Subject: [PATCH 242/668] Improve host recreation experience --- pkg/minikube/machine/delete.go | 33 +++++++++++++++++++++++ pkg/minikube/machine/fix.go | 48 ++++++++++++++-------------------- pkg/minikube/out/style.go | 1 - pkg/minikube/out/style_enum.go | 1 - 4 files changed, 52 insertions(+), 31 deletions(-) diff --git a/pkg/minikube/machine/delete.go b/pkg/minikube/machine/delete.go index c51a9d0931..136d232ac4 100644 --- a/pkg/minikube/machine/delete.go +++ b/pkg/minikube/machine/delete.go @@ -22,11 +22,13 @@ import ( "time" "github.com/docker/machine/libmachine" + "github.com/docker/machine/libmachine/host" "github.com/docker/machine/libmachine/mcnerror" "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" "k8s.io/minikube/pkg/drivers/kic/oci" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/out" ) @@ -100,3 +102,34 @@ func DeleteHost(api libmachine.API, machineName string) error { } return nil } + +// destroy demolishes a host by any means necessary +// Use only when the machine state appears to be inconsistent +func destroy(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) { + machineName := driver.MachineName(cc, n) + glog.Infof("destroying %s ...", machineName) + + // First try using the official friendly API's. + + // This will probably fail + err := StopHost(api, machineName) + if err != nil { + glog.Infof("stophost failed: %v", err) + } + + // For 95% of cases, this should be enough + err = DeleteHost(api, machineName) + if err != nil { + glog.Warningf("deletehost failed: %v", err) + } + + // DeleteHost may have returned success prematurely. Go further. + if err = h.Driver.Remove(); err != nil { + glog.Warningf("driver remove failed: %v", err) + } + + // Clean up the local files relating to this machine + if err = api.Remove(cc.Name); err != nil { + glog.Warningf("api remove failed: %v", err) + } +} diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 9575f92294..943bc7d8b0 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -54,8 +54,6 @@ var ( // fixHost fixes up a previously configured VM so that it is ready to run Kubernetes func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, error) { - out.T(out.Waiting, "Reconfiguring existing host ...") - start := time.Now() glog.Infof("fixHost starting: %s", n.Name) defer func() { @@ -101,43 +99,35 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. } func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) (*host.Host, error) { + machineName := driver.MachineName(cc, n) + machineType := driver.MachineType(cc.Driver) + s, err := h.Driver.GetState() + glog.Infof("recreateIfNeeded on %s: state=%s err=%v", machineName, s, err) if err != nil || s == state.Stopped || s == state.None { // If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine me, err := machineExists(h.Driver.DriverName(), s, err) - if !me { - // If the error is that virtual machine does not exist error, handle error(recreate virtual machine) - if err == ErrorMachineNotExist { - // remove virtual machine - if err := h.Driver.Remove(); err != nil { - // skip returning error since it may be before docker image pulling(so, no host exist) - if h.Driver.DriverName() != driver.Docker { - return nil, errors.Wrap(err, "host remove") - } - } - // remove machine config directory - if err := api.Remove(cc.Name); err != nil { - return nil, errors.Wrap(err, "api remove") - } - // recreate virtual machine - out.T(out.Meh, "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.", out.V{"name": cc.Name}) - h, err = createHost(api, cc, n) - if err != nil { - return nil, errors.Wrap(err, "Error recreating VM") - } - // return ErrMachineNotExist err to initialize preExists flag - return h, ErrorMachineNotExist + glog.Infof("exists: %v err=%v", me, err) + + if !me || err == ErrorMachineNotExist { + out.T(out.Provisioning, `Recreating {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) + destroy(api, cc, n, h) + time.Sleep(1 * time.Second) + h, err = createHost(api, cc, n) + if err != nil { + return nil, errors.Wrap(err, "recreate") + } + s, err = h.Driver.GetState() + if err != nil { + return nil, errors.Wrap(err, "recreated state") } - // If the error is not that virtual machine does not exist error, return error - return nil, errors.Wrap(err, "Error getting state for host") } } - machineType := driver.MachineType(cc.Driver) if s == state.Running { - out.T(out.Running, `Using the running {{.driver_name}} "{{.profile_name}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "profile_name": cc.Name, "machine_type": machineType}) + out.T(out.Running, `Updating the running {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) } else { - out.T(out.Restarting, `Starting existing {{.driver_name}} {{.machine_type}} for "{{.profile_name}}" ...`, out.V{"driver_name": cc.Driver, "profile_name": cc.Name, "machine_type": machineType}) + out.T(out.Restarting, `Retarting existing {{.driver_name}} {{.machine_type}} for "{{.cluster}}" ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) if err := h.Driver.Start(); err != nil { return h, errors.Wrap(err, "driver start") } diff --git a/pkg/minikube/out/style.go b/pkg/minikube/out/style.go index 150a7af30d..2ae3f25304 100644 --- a/pkg/minikube/out/style.go +++ b/pkg/minikube/out/style.go @@ -60,7 +60,6 @@ var styles = map[StyleEnum]style{ Running: {Prefix: "🏃 "}, Provisioning: {Prefix: "🌱 "}, Restarting: {Prefix: "🔄 "}, - Reconfiguring: {Prefix: "📯 "}, Stopping: {Prefix: "✋ "}, Stopped: {Prefix: "🛑 "}, Warning: {Prefix: "❗ ", LowPrefix: lowWarning}, diff --git a/pkg/minikube/out/style_enum.go b/pkg/minikube/out/style_enum.go index 747c277faf..097f4452dc 100644 --- a/pkg/minikube/out/style_enum.go +++ b/pkg/minikube/out/style_enum.go @@ -32,7 +32,6 @@ const ( Running Provisioning Restarting - Reconfiguring Stopping Stopped Warning From d33684d7eb68e0684f0b35b32f683ceb03563526 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 08:10:08 -0700 Subject: [PATCH 243/668] Make private stop/delete functions to avoid wheel reinvention --- pkg/minikube/machine/delete.go | 31 +++++++++++------------- pkg/minikube/machine/fix.go | 43 ++++++++++++++++++++-------------- pkg/minikube/machine/stop.go | 17 +++++++++----- 3 files changed, 50 insertions(+), 41 deletions(-) diff --git a/pkg/minikube/machine/delete.go b/pkg/minikube/machine/delete.go index 136d232ac4..69b493ac26 100644 --- a/pkg/minikube/machine/delete.go +++ b/pkg/minikube/machine/delete.go @@ -87,11 +87,16 @@ func DeleteHost(api libmachine.API, machineName string) error { } out.T(out.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName}) - if err := host.Driver.Remove(); err != nil { + return delete(api, host, machineName) +} + +// delete removes a host and it's local data files +func delete(api libmachine.API, h *host.Host, machineName string) error { + if err := h.Driver.Remove(); err != nil { glog.Warningf("remove failed, will retry: %v", err) time.Sleep(2 * time.Second) - nerr := host.Driver.Remove() + nerr := h.Driver.Remove() if nerr != nil { return errors.Wrap(nerr, "host remove retry") } @@ -103,18 +108,17 @@ func DeleteHost(api libmachine.API, machineName string) error { return nil } -// destroy demolishes a host by any means necessary -// Use only when the machine state appears to be inconsistent -func destroy(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) { +// demolish destroys a host by any means necessary - use only if state is inconsistent +func demolish(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) { machineName := driver.MachineName(cc, n) glog.Infof("destroying %s ...", machineName) - // First try using the official friendly API's. + // First try using the friendly API's. // This will probably fail - err := StopHost(api, machineName) + err := stop(h) if err != nil { - glog.Infof("stophost failed: %v", err) + glog.Infof("stophost failed (probably ok): %v", err) } // For 95% of cases, this should be enough @@ -123,13 +127,6 @@ func destroy(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host glog.Warningf("deletehost failed: %v", err) } - // DeleteHost may have returned success prematurely. Go further. - if err = h.Driver.Remove(); err != nil { - glog.Warningf("driver remove failed: %v", err) - } - - // Clean up the local files relating to this machine - if err = api.Remove(cc.Name); err != nil { - glog.Warningf("api remove failed: %v", err) - } + err = delete(api, h, machineName) + glog.Warningf("delete failed (probably ok) %v", err) } diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 943bc7d8b0..cc8e8e8271 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -101,41 +101,48 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) (*host.Host, error) { machineName := driver.MachineName(cc, n) machineType := driver.MachineType(cc.Driver) + recreated := false + s, serr := h.Driver.GetState() - s, err := h.Driver.GetState() - glog.Infof("recreateIfNeeded on %s: state=%s err=%v", machineName, s, err) - if err != nil || s == state.Stopped || s == state.None { + glog.Infof("recreateIfNeeded on %s: state=%s err=%v", machineName, s, serr) + if serr != nil || s == state.Stopped || s == state.None { // If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine - me, err := machineExists(h.Driver.DriverName(), s, err) + me, err := machineExists(h.Driver.DriverName(), s, serr) glog.Infof("exists: %v err=%v", me, err) if !me || err == ErrorMachineNotExist { - out.T(out.Provisioning, `Recreating {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) - destroy(api, cc, n, h) + out.T(out.Shrug, `{{.driver_name}} "{{.cluster}}" {{.machine_type}} is missing, will recreate.`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) + demolish(api, cc, n, h) time.Sleep(1 * time.Second) h, err = createHost(api, cc, n) if err != nil { return nil, errors.Wrap(err, "recreate") } - s, err = h.Driver.GetState() - if err != nil { - return nil, errors.Wrap(err, "recreated state") - } + recreated = true + s, serr = h.Driver.GetState() } } + if serr != ErrorMachineNotExist { + glog.Warningf("unexpected machine state, will restart: %v", serr) + } + if s == state.Running { - out.T(out.Running, `Updating the running {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) - } else { - out.T(out.Restarting, `Retarting existing {{.driver_name}} {{.machine_type}} for "{{.cluster}}" ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) - if err := h.Driver.Start(); err != nil { - return h, errors.Wrap(err, "driver start") - } - if err := api.Save(h); err != nil { - return h, errors.Wrap(err, "save") + if !recreated { + out.T(out.Running, `Updating the running {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) } + return h, nil } + if !recreated { + out.T(out.Restarting, `Retarting existing {{.driver_name}} {{.machine_type}} for "{{.cluster}}" ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) + } + if err := h.Driver.Start(); err != nil { + return h, errors.Wrap(err, "driver start") + } + if err := api.Save(h); err != nil { + return h, errors.Wrap(err, "save") + } return h, nil } diff --git a/pkg/minikube/machine/stop.go b/pkg/minikube/machine/stop.go index 97931a2b00..5c4ba4e671 100644 --- a/pkg/minikube/machine/stop.go +++ b/pkg/minikube/machine/stop.go @@ -30,25 +30,30 @@ import ( // StopHost stops the host VM, saving state to disk. func StopHost(api libmachine.API, machineName string) error { - host, err := api.Load(machineName) + h, err := api.Load(machineName) if err != nil { return errors.Wrapf(err, "load") } - out.T(out.Stopping, `Stopping "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName}) - if host.DriverName == driver.HyperV { + out.T(out.Stopping, `Stopping "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": h.DriverName}) + return stop(h) +} + +// stop forcibly stops a host without needing to load +func stop(h *host.Host) error { + if h.DriverName == driver.HyperV { glog.Infof("As there are issues with stopping Hyper-V VMs using API, trying to shut down using SSH") - if err := trySSHPowerOff(host); err != nil { + if err := trySSHPowerOff(h); err != nil { return errors.Wrap(err, "ssh power off") } } - if err := host.Stop(); err != nil { + if err := h.Stop(); err != nil { alreadyInStateError, ok := err.(mcnerror.ErrHostAlreadyInState) if ok && alreadyInStateError.State == state.Stopped { return nil } - return &retry.RetriableError{Err: errors.Wrapf(err, "Stop: %s", machineName)} + return &retry.RetriableError{Err: errors.Wrap(err, "stop")} } return nil } From 8afe879c56b56658526dea5d9b20e1e33634f7b6 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Wed, 25 Mar 2020 10:09:22 -0700 Subject: [PATCH 244/668] remove unnecessary check; we check this when cr.Preload is called --- pkg/minikube/node/start.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 9e90bc747f..5f7b131a6e 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -42,7 +42,6 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" - "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" @@ -198,7 +197,7 @@ func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config } // Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere. - if driver.IsVM(drvName) && download.PreloadExists(k8s.KubernetesVersion, k8s.ContainerRuntime) { + if driver.IsVM(drvName) { if err := cr.Preload(k8s); err != nil { switch err.(type) { case *cruntime.ErrISOFeature: From a8d63f7fa55423c81eb9947076257363eab09273 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 10:24:57 -0700 Subject: [PATCH 245/668] none: Skip checkHelmTiller if socat is not installed --- test/integration/addons_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index a784f381bf..3e001cd072 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -267,9 +267,17 @@ func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) t.Fatalf("wait: %v", err) } + if NoneDriver() { + _, err := exec.LookPath("socat") + if err != nil { + t.Skipf("socat is required by kubectl to complete this test") + } + } + want := "Server: &version.Version" // Test from inside the cluster (`helm version` use pod.list permission. we use tiller serviceaccount in kube-system to list pod) checkHelmTiller := func() error { + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "helm-test", "--restart=Never", "--image=alpine/helm:2.16.3", "-it", "--namespace=kube-system", "--serviceaccount=tiller", "--", "version")) if err != nil { return err From 442a7869d3bf9d6e5689afcf5dbb6062c42392ef Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 12:48:51 -0700 Subject: [PATCH 246/668] Re-initalize failed Kubernetes clusters --- go.sum | 2 - pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 104 +++++++++++++------ 2 files changed, 75 insertions(+), 31 deletions(-) diff --git a/go.sum b/go.sum index d7d0bc3a61..15a7b48dae 100644 --- a/go.sum +++ b/go.sum @@ -421,8 +421,6 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 h1:XP1VL9iOZu4yz/rq8zj+yvB23XEY5erXRzp8JYmkWu0= -github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f h1:tL0xH80QVHQOde6Qqdohv6PewABH8l8N9pywZtuojJ0= github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 5de297a003..9308439bdd 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -51,6 +51,7 @@ import ( "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/kubelet" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/vmpath" @@ -129,7 +130,7 @@ func (k *Bootstrapper) LogCommands(cfg config.ClusterConfig, o bootstrapper.LogO dmesg.WriteString(fmt.Sprintf(" | tail -n %d", o.Lines)) } - describeNodes := fmt.Sprintf("sudo %s describe node -A --kubeconfig=%s", + describeNodes := fmt.Sprintf("sudo %s describe nodes --kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"), path.Join(vmpath.GuestPersistentDir, "kubeconfig")) @@ -181,20 +182,7 @@ func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error { return nil } -// StartCluster starts the cluster -func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { - err := bsutil.ExistingConfig(k.c) - if err == nil { // if there is an existing cluster don't reconfigure it - return k.restartCluster(cfg) - } - glog.Infof("existence check: %v", err) - - start := time.Now() - glog.Infof("StartCluster: %+v", cfg) - defer func() { - glog.Infof("StartCluster complete in %s", time.Since(start)) - }() - +func (k *Bootstrapper) init(cfg config.ClusterConfig) error { version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) if err != nil { return errors.Wrap(err, "parsing kubernetes version") @@ -237,10 +225,10 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } conf := bsutil.KubeadmYamlPath - c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mv %s.new %s && %s init --config %s %s --ignore-preflight-errors=%s", conf, conf, bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ","))) - rr, err := k.c.RunCmd(c) - if err != nil { - return errors.Wrapf(err, "init failed. output: %q", rr.Output()) + c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", + bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ","))) + if _, err := k.c.RunCmd(c); err != nil { + return errors.Wrap(err, "run") } if cfg.Driver == driver.Docker { @@ -258,10 +246,47 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } if err := k.elevateKubeSystemPrivileges(cfg); err != nil { - glog.Warningf("unable to create cluster role binding, some addons might not work : %v. ", err) + glog.Warningf("unable to create cluster role binding, some addons might not work: %v", err) + } + return nil +} + +// StartCluster starts the cluster +func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { + start := time.Now() + glog.Infof("StartCluster: %+v", cfg) + defer func() { + glog.Infof("StartCluster complete in %s", time.Since(start)) + }() + + if err := bsutil.ExistingConfig(k.c); err == nil { + glog.Infof("found existing configuration files, will attempt cluster restart") + rerr := k.restartCluster(cfg) + if rerr == nil { + return nil + } + out.T(out.Embarrassed, "Unable to restart cluster, will reset it: {{.error}}", out.V{"error": rerr}) + if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil { + glog.Warningf("delete failed: %v", err) + } + // Fall-through to init } - return nil + conf := bsutil.KubeadmYamlPath + if _, err := k.c.RunCmd(exec.Command("sudo", "cp", conf+".new", conf)); err != nil { + return errors.Wrap(err, "cp") + } + + err := k.init(cfg) + if err == nil { + return nil + } + + out.T(out.Conflict, "initialization failed, will try again: {{.error}}", out.V{"error": err}) + if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil { + glog.Warningf("delete failed: %v", err) + } + return k.init(cfg) } func (k *Bootstrapper) controlPlaneEndpoint(cfg config.ClusterConfig) (string, int, error) { @@ -410,8 +435,8 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "clearing stale configs") } - if _, err := k.c.RunCmd(exec.Command("sudo", "mv", conf+".new", conf)); err != nil { - return errors.Wrap(err, "mv") + if _, err := k.c.RunCmd(exec.Command("sudo", "cp", conf+".new", conf)); err != nil { + return errors.Wrap(err, "cp") } baseCmd := fmt.Sprintf("%s %s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), phase) @@ -425,9 +450,9 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { glog.Infof("resetting cluster from %s", conf) // Run commands one at a time so that it is easier to root cause failures. for _, c := range cmds { - rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c)) + _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c)) if err != nil { - return errors.Wrapf(err, "running cmd: %s", rr.Command()) + return errors.Wrap(err, "run") } } @@ -504,11 +529,32 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { cmd = fmt.Sprintf("%s reset", bsutil.InvokeKubeadm(k8s.KubernetesVersion)) } - if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)); err != nil { - return errors.Wrapf(err, "kubeadm reset: cmd: %q", rr.Command()) + rr, derr := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)) + if derr != nil { + glog.Warningf("%s: %v", rr.Command(), err) } - return nil + if err := kubelet.ForceStop(k.c); err != nil { + glog.Warningf("stop kubelet: %v", err) + } + + cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket}) + if err != nil { + return errors.Wrap(err, "runtime") + } + + containers, err := cr.ListContainers(cruntime.ListOptions{Namespaces: []string{"kube-system"}}) + if err != nil { + glog.Warningf("unable to list kube-system containers: %v", err) + } + if len(containers) > 0 { + glog.Warningf("found %d kube-system containers to stop", len(containers)) + if err := cr.StopContainers(containers); err != nil { + glog.Warningf("error stopping containers: %v", err) + } + } + + return derr } // SetupCerts sets up certificates within the cluster. @@ -619,7 +665,7 @@ func reloadKubelet(runner command.Runner) error { return nil } - startCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mv %s.new %s && sudo mv %s.new %s && sudo systemctl daemon-reload && sudo systemctl restart kubelet", svc, svc, conf, conf)) + startCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo cp %s.new %s && sudo cp %s.new %s && sudo systemctl daemon-reload && sudo systemctl restart kubelet", svc, svc, conf, conf)) if _, err := runner.RunCmd(startCmd); err != nil { return errors.Wrap(err, "starting kubelet") } From d18cb6fe09ac2577749eaeb534e09f4c76281c7d Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 13:26:47 -0700 Subject: [PATCH 247/668] install socat --- .github/workflows/main.yml | 6 ++++-- hack/jenkins/linux_integration_tests_none.sh | 6 ++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 79a9f0982e..44df941563 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -233,7 +233,8 @@ jobs: runs-on: ubuntu-16.04 steps: # conntrack is required for kubernetes 1.18 and higher - - name: Install conntrack + # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon + - name: Install tools for none shell: bash run: | sudo apt-get update -qq @@ -303,7 +304,8 @@ jobs: runs-on: ubuntu-18.04 steps: # conntrack is required for kubernetes 1.18 and higher - - name: Install conntrack + # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon + - name: Install tools for none shell: bash run: | sudo apt-get update -qq diff --git a/hack/jenkins/linux_integration_tests_none.sh b/hack/jenkins/linux_integration_tests_none.sh index 9f45f2ad54..d6c99722c6 100755 --- a/hack/jenkins/linux_integration_tests_none.sh +++ b/hack/jenkins/linux_integration_tests_none.sh @@ -58,6 +58,12 @@ if ! conntrack --version &>/dev/null; then sudo apt-get -qq -y install conntrack fi + # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon +if ! which socat &>/dev/null; then + echo "WARNING: No socat is not installed" + sudo apt-get update -qq + sudo apt-get -qq -y install socat +fi mkdir -p cron && gsutil -m rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES" sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP" From 23b4ec9df342b16a35fb92ea79b2c14f51bfe793 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 13:28:45 -0700 Subject: [PATCH 248/668] install socat gh actions --- .github/workflows/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 44df941563..4a0bd5b0aa 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -239,6 +239,7 @@ jobs: run: | sudo apt-get update -qq sudo apt-get -qq -y install conntrack + sudo apt-get -qq -y install socat - name: Install gopogh shell: bash run: | @@ -310,6 +311,7 @@ jobs: run: | sudo apt-get update -qq sudo apt-get -qq -y install conntrack + sudo apt-get -qq -y install socat - name: Install gopogh shell: bash run: | From 093b7c29be4272cfaaa2781c1e445dfba6088bb8 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 13:30:08 -0700 Subject: [PATCH 249/668] improve comments --- hack/jenkins/linux_integration_tests_none.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/jenkins/linux_integration_tests_none.sh b/hack/jenkins/linux_integration_tests_none.sh index d6c99722c6..0549fde508 100755 --- a/hack/jenkins/linux_integration_tests_none.sh +++ b/hack/jenkins/linux_integration_tests_none.sh @@ -53,14 +53,14 @@ sudo systemctl is-active --quiet kubelet \ # conntrack is required for kubernetes 1.18 and higher for none driver if ! conntrack --version &>/dev/null; then - echo "WARNING: No contrack is not installed" + echo "WARNING: contrack is not installed. will try to install." sudo apt-get update -qq sudo apt-get -qq -y install conntrack fi # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon if ! which socat &>/dev/null; then - echo "WARNING: No socat is not installed" + echo "WARNING: socat is not installed. will try to install." sudo apt-get update -qq sudo apt-get -qq -y install socat fi From ea9aa72984bddfbf29b3a4573c0eed483dc62c0f Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 13:44:28 -0700 Subject: [PATCH 250/668] Retry addon application --- go.sum | 2 -- pkg/addons/addons.go | 17 +++++++++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/go.sum b/go.sum index d7d0bc3a61..15a7b48dae 100644 --- a/go.sum +++ b/go.sum @@ -421,8 +421,6 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 h1:XP1VL9iOZu4yz/rq8zj+yvB23XEY5erXRzp8JYmkWu0= -github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f h1:tL0xH80QVHQOde6Qqdohv6PewABH8l8N9pywZtuojJ0= github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= diff --git a/pkg/addons/addons.go b/pkg/addons/addons.go index d644aafc88..a3b1600a3d 100644 --- a/pkg/addons/addons.go +++ b/pkg/addons/addons.go @@ -34,6 +34,7 @@ import ( "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/storageclass" + "k8s.io/minikube/pkg/util/retry" ) // defaultStorageClassProvisioner is the name of the default storage class provisioner @@ -211,13 +212,17 @@ func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon, } command := kubectlCommand(cc, deployFiles, enable) - glog.Infof("Running: %v", command) - rr, err := cmd.RunCmd(command) - if err != nil { - return errors.Wrapf(err, "addon apply") + + // Retry, because sometimes we race against an apiserver restart + apply := func() error { + _, err := cmd.RunCmd(command) + if err != nil { + glog.Warningf("apply failed, will retry: %v", err) + } + return err } - glog.Infof("output:\n%s", rr.Output()) - return nil + + return retry.Expo(apply, 1*time.Second, time.Second*30) } // enableOrDisableStorageClasses enables or disables storage classes From c3fba065a9e97d48e616885f405e9ab15dbbfcee Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 25 Mar 2020 14:07:39 -0700 Subject: [PATCH 251/668] do not override hostname if extraConfig is specified --- pkg/minikube/bootstrapper/bsutil/kubelet.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index c2180838a3..4068deac21 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -62,7 +62,7 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage extraOpts["node-ip"] = cp.IP } nodeName := KubeNodeName(mc, nc) - if nodeName != "" { + if _, ok := extraOpts["hostname-override"]; !ok { extraOpts["hostname-override"] = nodeName } From 6417e85f5b1d78865372230fba6b21f2fac2ac8e Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 25 Mar 2020 14:40:40 -0700 Subject: [PATCH 252/668] fix ordering --- pkg/minikube/bootstrapper/bsutil/kubelet.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index 4068deac21..1ed22d17c6 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -61,8 +61,8 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage if _, ok := extraOpts["node-ip"]; !ok { extraOpts["node-ip"] = cp.IP } - nodeName := KubeNodeName(mc, nc) if _, ok := extraOpts["hostname-override"]; !ok { + nodeName := KubeNodeName(mc, nc) extraOpts["hostname-override"] = nodeName } From f84569aab05a7a7d5e2a929c1045d5a3a9e425fd Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 14:46:44 -0700 Subject: [PATCH 253/668] Make crazy cluster_test / mock_driver combination pass tests --- cmd/minikube/cmd/stop.go | 7 ++++--- pkg/minikube/constants/constants.go | 4 ++++ pkg/minikube/machine/cluster_test.go | 7 ++++++- pkg/minikube/machine/delete.go | 6 ++---- pkg/minikube/machine/fix.go | 26 +++++++++++++------------- pkg/minikube/machine/stop.go | 11 +++++++++-- pkg/minikube/tests/driver_mock.go | 16 +++++++++++----- 7 files changed, 49 insertions(+), 28 deletions(-) diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index 6d79b0446f..1036a5d365 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -69,8 +69,9 @@ func runStop(cmd *cobra.Command, args []string) { func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool { nonexistent := false - stop := func() (err error) { - machineName := driver.MachineName(cluster, n) + machineName := driver.MachineName(cluster, n) + + tryStop := func() (err error) { err = machine.StopHost(api, machineName) if err == nil { return nil @@ -87,7 +88,7 @@ func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool } } - if err := retry.Expo(stop, 5*time.Second, 3*time.Minute, 5); err != nil { + if err := retry.Expo(tryStop, 1*time.Second, 30*time.Second, 3); err != nil { exit.WithError("Unable to stop VM", err) } diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index 33ae576f48..af053eee32 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -17,6 +17,7 @@ limitations under the License. package constants import ( + "errors" "path/filepath" "k8s.io/client-go/tools/clientcmd" @@ -100,4 +101,7 @@ var ( "storage-gluster", "istio-operator", } + + // ErrMachineMissing is returned when virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C) + ErrMachineMissing = errors.New("machine does not exist") ) diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 0d0c16c64b..6d687d3825 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -17,11 +17,13 @@ limitations under the License. package machine import ( + "flag" "fmt" "testing" "time" // Driver used by testdata + "k8s.io/minikube/pkg/minikube/constants" _ "k8s.io/minikube/pkg/minikube/registry/drvs/virtualbox" "github.com/docker/machine/libmachine/drivers" @@ -41,6 +43,9 @@ func createMockDriverHost(c config.ClusterConfig, n config.Node) (interface{}, e } func RegisterMockDriver(t *testing.T) { + // Debugging this test is a nightmare. + flag.Lookup("logtostderr").Value.Set("true") + t.Helper() if !registry.Driver(driver.Mock).Empty() { return @@ -163,7 +168,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { // This should pass with creating host, while machine does not exist. h, _, err = StartHost(api, mc, n) if err != nil { - if err != ErrorMachineNotExist { + if err != constants.ErrMachineMissing { t.Fatalf("Error starting host: %v", err) } } diff --git a/pkg/minikube/machine/delete.go b/pkg/minikube/machine/delete.go index 69b493ac26..2132d16737 100644 --- a/pkg/minikube/machine/delete.go +++ b/pkg/minikube/machine/delete.go @@ -94,7 +94,7 @@ func DeleteHost(api libmachine.API, machineName string) error { func delete(api libmachine.API, h *host.Host, machineName string) error { if err := h.Driver.Remove(); err != nil { glog.Warningf("remove failed, will retry: %v", err) - time.Sleep(2 * time.Second) + time.Sleep(1 * time.Second) nerr := h.Driver.Remove() if nerr != nil { @@ -111,9 +111,7 @@ func delete(api libmachine.API, h *host.Host, machineName string) error { // demolish destroys a host by any means necessary - use only if state is inconsistent func demolish(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) { machineName := driver.MachineName(cc, n) - glog.Infof("destroying %s ...", machineName) - - // First try using the friendly API's. + glog.Infof("DEMOLISHING %s ...", machineName) // This will probably fail err := stop(h) diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index cc8e8e8271..dd1f927a65 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -47,11 +47,6 @@ const ( maxClockDesyncSeconds = 2.1 ) -var ( - // ErrorMachineNotExist is returned when virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C) - ErrorMachineNotExist = errors.New("machine does not exist") -) - // fixHost fixes up a previously configured VM so that it is ready to run Kubernetes func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, error) { start := time.Now() @@ -109,21 +104,26 @@ func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node // If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine me, err := machineExists(h.Driver.DriverName(), s, serr) glog.Infof("exists: %v err=%v", me, err) + glog.Infof("%q vs %q", err, constants.ErrMachineMissing) - if !me || err == ErrorMachineNotExist { + if !me || err == constants.ErrMachineMissing { out.T(out.Shrug, `{{.driver_name}} "{{.cluster}}" {{.machine_type}} is missing, will recreate.`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) demolish(api, cc, n, h) + + glog.Infof("Sleeping 1 second for extra luck!") time.Sleep(1 * time.Second) + h, err = createHost(api, cc, n) if err != nil { return nil, errors.Wrap(err, "recreate") } + recreated = true s, serr = h.Driver.GetState() } } - if serr != ErrorMachineNotExist { + if serr != constants.ErrMachineMissing { glog.Warningf("unexpected machine state, will restart: %v", serr) } @@ -219,7 +219,7 @@ func adjustGuestClock(h hostRunner, t time.Time) error { func machineExistsState(s state.State, err error) (bool, error) { if s == state.None { - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } return true, err } @@ -228,7 +228,7 @@ func machineExistsError(s state.State, err error, drverr error) (bool, error) { _ = s // not used if err == drverr { // if the error matches driver error - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } return true, err } @@ -236,7 +236,7 @@ func machineExistsError(s state.State, err error, drverr error) (bool, error) { func machineExistsMessage(s state.State, err error, msg string) (bool, error) { if s == state.None || (err != nil && err.Error() == msg) { // if the error contains the message - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } return true, err } @@ -244,10 +244,10 @@ func machineExistsMessage(s state.State, err error, msg string) (bool, error) { func machineExistsDocker(s state.State, err error) (bool, error) { if s == state.Error { // if the kic image is not present on the host machine, when user cancel `minikube start`, state.Error will be return - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } else if s == state.None { // if the kic image is present on the host machine, when user cancel `minikube start`, state.None will be return - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } return true, err } @@ -279,7 +279,7 @@ func machineExists(d string, s state.State, err error) (bool, error) { return machineExistsDocker(s, err) case driver.Mock: if s == state.Error { - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } return true, err default: diff --git a/pkg/minikube/machine/stop.go b/pkg/minikube/machine/stop.go index 5c4ba4e671..fafe09e446 100644 --- a/pkg/minikube/machine/stop.go +++ b/pkg/minikube/machine/stop.go @@ -17,6 +17,8 @@ limitations under the License. package machine import ( + "time" + "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/host" "github.com/docker/machine/libmachine/mcnerror" @@ -30,6 +32,7 @@ import ( // StopHost stops the host VM, saving state to disk. func StopHost(api libmachine.API, machineName string) error { + glog.Infof("StopHost: %v", machineName) h, err := api.Load(machineName) if err != nil { return errors.Wrapf(err, "load") @@ -41,6 +44,7 @@ func StopHost(api libmachine.API, machineName string) error { // stop forcibly stops a host without needing to load func stop(h *host.Host) error { + start := time.Now() if h.DriverName == driver.HyperV { glog.Infof("As there are issues with stopping Hyper-V VMs using API, trying to shut down using SSH") if err := trySSHPowerOff(h); err != nil { @@ -49,12 +53,15 @@ func stop(h *host.Host) error { } if err := h.Stop(); err != nil { - alreadyInStateError, ok := err.(mcnerror.ErrHostAlreadyInState) - if ok && alreadyInStateError.State == state.Stopped { + glog.Infof("stop err: %v", err) + st, ok := err.(mcnerror.ErrHostAlreadyInState) + if ok && st.State == state.Stopped { + glog.Infof("host is already stopped") return nil } return &retry.RetriableError{Err: errors.Wrap(err, "stop")} } + glog.Infof("stop complete within %s", time.Since(start)) return nil } diff --git a/pkg/minikube/tests/driver_mock.go b/pkg/minikube/tests/driver_mock.go index 14d5b2f59d..2b9dff6ad0 100644 --- a/pkg/minikube/tests/driver_mock.go +++ b/pkg/minikube/tests/driver_mock.go @@ -17,6 +17,7 @@ limitations under the License. package tests import ( + "runtime" "testing" "github.com/docker/machine/libmachine/drivers" @@ -24,6 +25,7 @@ import ( "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/constants" ) // MockDriver is a struct used to mock out libmachine.Driver @@ -96,11 +98,14 @@ func (d *MockDriver) GetSSHKeyPath() string { // GetState returns the state of the driver func (d *MockDriver) GetState() (state.State, error) { - d.Logf("MockDriver.GetState: %v", d.CurrentState) - if d.NotExistError { + _, file, no, _ := runtime.Caller(2) + d.Logf("MockDriver.GetState called from %s#%d: returning %q", file, no, d.CurrentState) + + // NOTE: this logic is questionable + if d.NotExistError && d.CurrentState != state.Stopped && d.CurrentState != state.None { d.CurrentState = state.Error - // don't use cluster.ErrorMachineNotExist to avoid import cycle - return d.CurrentState, errors.New("machine does not exist") + d.Logf("mock NotExistError set, setting state=%s err=%v", d.CurrentState, constants.ErrMachineMissing) + return d.CurrentState, constants.ErrMachineMissing } return d.CurrentState, nil } @@ -123,12 +128,13 @@ func (d *MockDriver) Remove() error { if d.RemoveError { return errors.New("error deleting machine") } + d.NotExistError = false return nil } // Restart restarts the machine func (d *MockDriver) Restart() error { - d.Logf("MockDriver.Restart") + d.Logf("MockDriver.Restart, setting CurrentState=%s", state.Running) d.CurrentState = state.Running return nil } From 1ee125e68852ab592a9e7a26c954feafc59853e3 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 14:50:13 -0700 Subject: [PATCH 254/668] lint err --- go.sum | 2 -- pkg/minikube/machine/cluster_test.go | 4 +++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.sum b/go.sum index d7d0bc3a61..15a7b48dae 100644 --- a/go.sum +++ b/go.sum @@ -421,8 +421,6 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 h1:XP1VL9iOZu4yz/rq8zj+yvB23XEY5erXRzp8JYmkWu0= -github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f h1:tL0xH80QVHQOde6Qqdohv6PewABH8l8N9pywZtuojJ0= github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 6d687d3825..01c99de92e 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -44,7 +44,9 @@ func createMockDriverHost(c config.ClusterConfig, n config.Node) (interface{}, e func RegisterMockDriver(t *testing.T) { // Debugging this test is a nightmare. - flag.Lookup("logtostderr").Value.Set("true") + if err := flag.Lookup("logtostderr").Value.Set("true"); err != nil { + t.Logf("unable to set logtostderr: %v", err) + } t.Helper() if !registry.Driver(driver.Mock).Empty() { From 9597fcc1342414b9150faa0d9a971b6c06b1e41b Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 14:52:51 -0700 Subject: [PATCH 255/668] validateMinikubeKubectl: Add --context to kubectl call to pass profile name --- test/integration/functional_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 5111590fe6..ba0af809d2 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -233,7 +233,7 @@ func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) { // validateMinikubeKubectl validates that the `minikube kubectl` command returns content func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) { - kubectlArgs := []string{"kubectl", "--", "get", "pods"} + kubectlArgs := []string{"kubectl", "--", "--context", profile, "get", "pods"} rr, err := Run(t, exec.CommandContext(ctx, Target(), kubectlArgs...)) if err != nil { t.Fatalf("%s failed: %v", rr.Args, err) From 9089296f2ee805341abd8d2468ec51838fa19315 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 14:57:27 -0700 Subject: [PATCH 256/668] validateMinikubeKubectl: Add -p to minikube call to get Kubernetes version --- test/integration/functional_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index ba0af809d2..63b2a28c9c 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -233,7 +233,8 @@ func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) { // validateMinikubeKubectl validates that the `minikube kubectl` command returns content func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) { - kubectlArgs := []string{"kubectl", "--", "--context", profile, "get", "pods"} + // Must set the profile so that it knows what version of Kubernetes to use + kubectlArgs := []string{"-p", profile, "kubectl", "--", "--context", profile, "get", "pods"} rr, err := Run(t, exec.CommandContext(ctx, Target(), kubectlArgs...)) if err != nil { t.Fatalf("%s failed: %v", rr.Args, err) From fd1897ddaecaf7fe9b1e508a620e699e2a45e090 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 25 Mar 2020 15:01:57 -0700 Subject: [PATCH 257/668] replace emoji with spacing issues --- pkg/minikube/out/style.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/minikube/out/style.go b/pkg/minikube/out/style.go index 150a7af30d..bcae27ec87 100644 --- a/pkg/minikube/out/style.go +++ b/pkg/minikube/out/style.go @@ -92,7 +92,7 @@ var styles = map[StyleEnum]style{ Caching: {Prefix: "🤹 "}, StartingVM: {Prefix: "🔥 "}, StartingNone: {Prefix: "🤹 "}, - Provisioner: {Prefix: "ℹ️ "}, + Provisioner: {Prefix: "ℹ️ "}, Resetting: {Prefix: "🔄 "}, DeletingHost: {Prefix: "🔥 "}, Copying: {Prefix: "✨ "}, @@ -117,7 +117,7 @@ var styles = map[StyleEnum]style{ Unmount: {Prefix: "🔥 "}, MountOptions: {Prefix: "💾 "}, Fileserver: {Prefix: "🚀 ", OmitNewline: true}, - DryRun: {Prefix: "🏜️ "}, + DryRun: {Prefix: "🌵 "}, AddonEnable: {Prefix: "🌟 "}, AddonDisable: {Prefix: "🌑 "}, } From f52df44fd5dcd5eaadfe709b249dc4d12ea26eca Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Wed, 25 Mar 2020 16:43:35 -0700 Subject: [PATCH 258/668] If preload fails on hot restart, log warning instead of erroring out --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 5de297a003..6091024d60 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -531,7 +531,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { } if err := r.Preload(cfg.KubernetesConfig); err != nil { - return errors.Wrap(err, "preloading") + glog.Infof("prelaoding failed, will try to load cached images: %v", err) } if cfg.KubernetesConfig.ShouldLoadCachedImages { From dc3b842d4164dd96d4c9192833c22b769bd3f2f8 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 17:11:22 -0700 Subject: [PATCH 259/668] Avoid provision.Detector when possible --- pkg/minikube/machine/fix.go | 23 +++++++++++++---------- pkg/minikube/machine/info.go | 22 +++++++++++----------- pkg/minikube/machine/start.go | 2 +- 3 files changed, 25 insertions(+), 22 deletions(-) diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 9575f92294..05cbb8eec5 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -67,21 +67,24 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. return h, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.") } + driverName := h.Driver.DriverName() + // check if need to re-run docker-env - maybeWarnAboutEvalEnv(cc.Driver, cc.Name) + maybeWarnAboutEvalEnv(driverName, cc.Name) h, err = recreateIfNeeded(api, cc, n, h) if err != nil { return h, err } - // Technically, we should only have to call provision if Docker has changed, - // but who can predict what shape the existing VM is in. - e := engineOptions(cc) - h.HostOptions.EngineOptions.Env = e.Env - err = provisionDockerMachine(h) - if err != nil { - return h, errors.Wrap(err, "provision") + if !driver.BareMetal(h.Driver.DriverName()) { + glog.Infof("%s is local, skipping re-provision as it requires SSH", driverName) + e := engineOptions(cc) + h.HostOptions.EngineOptions.Env = e.Env + err = provisionDockerMachine(h) + if err != nil { + return h, errors.Wrap(err, "provision") + } } if driver.IsMock(h.DriverName) { @@ -93,11 +96,11 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. } if driver.BareMetal(h.Driver.DriverName()) { - glog.Infof("%s is local, skipping auth/time setup (requires ssh)", h.Driver.DriverName()) + glog.Infof("%s is local, skipping auth/time setup (requires ssh)", driverName) return h, nil } - return h, ensureSyncedGuestClock(h, cc.Driver) + return h, ensureSyncedGuestClock(h, driverName) } func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) (*host.Host, error) { diff --git a/pkg/minikube/machine/info.go b/pkg/minikube/machine/info.go index 1bae7253e9..c3b4e06569 100644 --- a/pkg/minikube/machine/info.go +++ b/pkg/minikube/machine/info.go @@ -18,13 +18,14 @@ package machine import ( "io/ioutil" + "os/exec" - "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/provision" "github.com/golang/glog" "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/disk" "github.com/shirou/gopsutil/mem" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/out" ) @@ -80,18 +81,17 @@ func showLocalOsRelease() { } // logRemoteOsRelease shows systemd information about the current linux distribution, on the remote VM -func logRemoteOsRelease(drv drivers.Driver) { - provisioner, err := provision.DetectProvisioner(drv) +func logRemoteOsRelease(r command.Runner) { + rr, err := r.RunCmd(exec.Command("cat", "/etc/os-release")) if err != nil { - glog.Errorf("DetectProvisioner: %v", err) + glog.Infof("remote release failed: %v", err) + } + + osReleaseInfo, err := provision.NewOsRelease(rr.Stdout.Bytes()) + if err != nil { + glog.Errorf("NewOsRelease: %v", err) return } - osReleaseInfo, err := provisioner.GetOsReleaseInfo() - if err != nil { - glog.Errorf("GetOsReleaseInfo: %v", err) - return - } - - glog.Infof("Provisioned with %s", osReleaseInfo.PrettyName) + glog.Infof("Remote host: %s", osReleaseInfo.PrettyName) } diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index c41b285a2f..e42fc9cf62 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -212,7 +212,7 @@ func postStartSetup(h *host.Host, mc config.ClusterConfig) error { showLocalOsRelease() } if driver.IsVM(mc.Driver) { - logRemoteOsRelease(h.Driver) + logRemoteOsRelease(r) } return syncLocalAssets(r) } From cc0262ef55cbae4e0403943c7ae6b0068be2cb80 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 19:58:38 -0700 Subject: [PATCH 260/668] improve logging and fix some not logged errors --- test/integration/aaa_download_only_test.go | 18 +++--- test/integration/addons_test.go | 68 +++++++++++----------- test/integration/functional_test.go | 36 ++++++------ test/integration/version_upgrade_test.go | 4 +- 4 files changed, 63 insertions(+), 63 deletions(-) diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index 8be373a5dd..cf6239cd9c 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -67,7 +67,7 @@ func TestDownloadOnly(t *testing.T) { } if err != nil { - t.Errorf("%s failed: %v", args, err) + t.Errorf("failed to download only. args: %q %v", args, err) } // skip for none, as none driver does not have preload feature. @@ -75,14 +75,14 @@ func TestDownloadOnly(t *testing.T) { if download.PreloadExists(v, r) { // Just make sure the tarball path exists if _, err := os.Stat(download.TarballPath(v)); err != nil { - t.Errorf("preloaded tarball path doesn't exist: %v", err) + t.Errorf("failed to verify preloaded tarball file exists: %v", err) } return } } imgs, err := images.Kubeadm("", v) if err != nil { - t.Errorf("kubeadm images: %v %+v", v, err) + t.Errorf("failed to get kubeadm images for %v: %+v", v, err) } // skip verify for cache images if --driver=none @@ -129,7 +129,7 @@ func TestDownloadOnly(t *testing.T) { } rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to delete all. args: %q : %v", rr.Args, err) } }) // Delete should always succeed, even if previously partially or fully deleted. @@ -139,7 +139,7 @@ func TestDownloadOnly(t *testing.T) { } rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to delete. args: %q: %v", rr.Args, err) } }) }) @@ -158,22 +158,22 @@ func TestDownloadOnlyKic(t *testing.T) { args = append(args, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v:\n%s", args, err, rr.Output()) + t.Errorf("start with download only failed %q : %v:\n%s", args, err) } // Make sure the downloaded image tarball exists tarball := download.TarballPath(constants.DefaultKubernetesVersion) contents, err := ioutil.ReadFile(tarball) if err != nil { - t.Errorf("reading tarball: %v", err) + t.Errorf("failed to read tarball file %q: %v", tarball, err) } // Make sure it has the correct checksum checksum := md5.Sum(contents) remoteChecksum, err := ioutil.ReadFile(download.PreloadChecksumPath(constants.DefaultKubernetesVersion)) if err != nil { - t.Errorf("reading checksum file: %v", err) + t.Errorf("failed to read checksum file %q : %v", download.PreloadChecksumPath(constants.DefaultKubernetesVersion), err) } if string(remoteChecksum) != string(checksum[:]) { - t.Errorf("checksum of %s does not match remote checksum (%s != %s)", tarball, string(remoteChecksum), string(checksum[:])) + t.Errorf("failed to verify checksum. checksum of %q does not match remote checksum (%q != %q)", tarball, string(remoteChecksum), string(checksum[:])) } } diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index 3e001cd072..43ad112651 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -69,15 +69,15 @@ func TestAddons(t *testing.T) { // Assert that disable/enable works offline rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to stop minikube. args %q : %v", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to enable dashboard addon: args %q : %v", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "disable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to disable dashboard addon: args %q : %v", rr.Args, err) } } @@ -88,30 +88,30 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { client, err := kapi.Client(profile) if err != nil { - t.Fatalf("kubernetes client: %v", client) + t.Fatalf("failed to get kubernetes client: %v", client) } if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "nginx-ingress-controller", Minutes(6)); err != nil { - t.Errorf("waiting for ingress-controller deployment to stabilize: %v", err) + t.Errorf("failed waiting for ingress-controller deployment to stabilize: %v", err) } if _, err := PodWait(ctx, t, profile, "kube-system", "app.kubernetes.io/name=nginx-ingress-controller", Minutes(12)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waititing for nginx-ingress-controller : %v", err) } rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-ing.yaml"))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to kubectl replace nginx-ing. args %q. %v", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-pod-svc.yaml"))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to kubectl replace nginx-pod-svc. args %q. %v", rr.Args, err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx", Minutes(4)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for ngnix pod: %v", err) } if err := kapi.WaitForService(client, "default", "nginx", true, time.Millisecond*500, Minutes(10)); err != nil { - t.Errorf("Error waiting for nginx service to be up") + t.Errorf("failed waiting for nginx service to be up: %v", err) } want := "Welcome to nginx!" @@ -121,7 +121,7 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { return err } if rr.Stderr.String() != "" { - t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr) + t.Logf("%v: unexpected stderr: %s (may be temproary)", rr.Args, rr.Stderr) } if !strings.Contains(rr.Stdout.String(), want) { return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want) @@ -130,32 +130,32 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { } if err := retry.Expo(checkIngress, 500*time.Millisecond, Seconds(90)); err != nil { - t.Errorf("ingress never responded as expected on 127.0.0.1:80: %v", err) + t.Errorf("failed to get response from ngninx ingress on 127.0.0.1:80: %v", err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "ingress", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to disable ingress addon. args %q : %v", rr.Args, err) } } func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { client, err := kapi.Client(profile) if err != nil { - t.Fatalf("kubernetes client: %v", client) + t.Fatalf("failed to get kubernetes client for %s : %v", profile, err) } start := time.Now() if err := kapi.WaitForRCToStabilize(client, "kube-system", "registry", Minutes(6)); err != nil { - t.Errorf("waiting for registry replicacontroller to stabilize: %v", err) + t.Errorf("failed waiting for registry replicacontroller to stabilize: %v", err) } t.Logf("registry stabilized in %s", time.Since(start)) if _, err := PodWait(ctx, t, profile, "kube-system", "actual-registry=true", Minutes(6)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for pod actual-registry: %v", err) } if _, err := PodWait(ctx, t, profile, "kube-system", "registry-proxy=true", Minutes(10)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for pod registry-proxy: %v", err) } // Test from inside the cluster (no curl available on busybox) @@ -166,20 +166,20 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "registry-test", "--restart=Never", "--image=busybox", "-it", "--", "sh", "-c", "wget --spider -S http://registry.kube-system.svc.cluster.local")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to hit registry.kube-system.svc.cluster.local. args %q failed: %v", rr.Args, err) } want := "HTTP/1.1 200" if !strings.Contains(rr.Stdout.String(), want) { - t.Errorf("curl = %q, want *%s*", rr.Stdout.String(), want) + t.Errorf("expected curl response be %q, but got *%s*", want, rr.Stdout.String()) } // Test from outside the cluster rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ip")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed run minikube ip. args %q : %v", rr.Args, err) } if rr.Stderr.String() != "" { - t.Errorf("%s: unexpected stderr: %s", rr.Args, rr.Stderr) + t.Errorf("expected stderr to be -empty- but got: *%q*", rr.Args, rr.Stderr) } endpoint := fmt.Sprintf("http://%s:%d", strings.TrimSpace(rr.Stdout.String()), 5000) @@ -199,30 +199,30 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { return nil } - if err := retry.Expo(checkExternalAccess, 500*time.Millisecond, Minutes(2)); err != nil { - t.Errorf(err.Error()) + if err := retry.Expo(checkExternalAccess, 500*time.Millisecond, Seconds(150)); err != nil { + t.Errorf("failed to check external access to %s: %v", u.String(), err.Error()) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "registry", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to disable registry addon. args %q: %v", rr.Args, err) } } func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile string) { client, err := kapi.Client(profile) if err != nil { - t.Fatalf("kubernetes client: %v", client) + t.Fatalf("failed to get kubernetes client for %s: %v", profile, err) } start := time.Now() if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "metrics-server", Minutes(6)); err != nil { - t.Errorf("waiting for metrics-server deployment to stabilize: %v", err) + t.Errorf("failed waiting for metrics-server deployment to stabilize: %v", err) } t.Logf("metrics-server stabilized in %s", time.Since(start)) if _, err := PodWait(ctx, t, profile, "kube-system", "k8s-app=metrics-server", Minutes(6)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for k8s-app=metrics-server pod: %v", err) } want := "CPU(cores)" @@ -242,29 +242,29 @@ func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile strin // metrics-server takes some time to be able to collect metrics if err := retry.Expo(checkMetricsServer, time.Second*3, Minutes(6)); err != nil { - t.Errorf(err.Error()) + t.Errorf("failed checking metric server: %v", err.Error()) } rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "metrics-server", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to disable metrics-server addon: args %q: %v", rr.Args, err) } } func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) { client, err := kapi.Client(profile) if err != nil { - t.Fatalf("kubernetes client: %v", client) + t.Fatalf("failed to get kubernetes client for %s: %v", profile, err) } start := time.Now() if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "tiller-deploy", Minutes(6)); err != nil { - t.Errorf("waiting for tiller-deploy deployment to stabilize: %v", err) + t.Errorf("failed waiting for tiller-deploy deployment to stabilize: %v", err) } t.Logf("tiller-deploy stabilized in %s", time.Since(start)) if _, err := PodWait(ctx, t, profile, "kube-system", "app=helm", Minutes(6)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for helm pod: %v", err) } if NoneDriver() { @@ -292,11 +292,11 @@ func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) } if err := retry.Expo(checkHelmTiller, 500*time.Millisecond, Minutes(2)); err != nil { - t.Errorf(err.Error()) + t.Errorf("failed checking helm tiller: %v", err.Error()) } rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "helm-tiller", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed disabling helm-tiller addon. arg %q.s %v", rr.Args, err) } } diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 63b2a28c9c..127ec9dd99 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -270,40 +270,40 @@ func validateComponentHealth(ctx context.Context, t *testing.T, profile string) func validateStatusCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%q failed: %v", rr.Args, err) } // Custom format rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-f", "host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%q failed: %v", rr.Args, err) } match, _ := regexp.MatchString(`host:([A-z]+),kublet:([A-z]+),apiserver:([A-z]+),kubeconfig:([A-z]+)`, rr.Stdout.String()) if !match { - t.Errorf("%s failed: %v. Output for custom format did not match", rr.Args, err) + t.Errorf("%q failed: %v. Output for custom format did not match", rr.Args, err) } // Json output rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-o", "json")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%q failed: %v", rr.Args, err) } var jsonObject map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%q failed: %v", rr.Args, err) } if _, ok := jsonObject["Host"]; !ok { - t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Host") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "Host") } if _, ok := jsonObject["Kubelet"]; !ok { - t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Kubelet") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "Kubelet") } if _, ok := jsonObject["APIServer"]; !ok { - t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "APIServer") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "APIServer") } if _, ok := jsonObject["Kubeconfig"]; !ok { - t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Kubeconfig") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "Kubeconfig") } } @@ -312,7 +312,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { args := []string{"dashboard", "--url", "-p", profile, "--alsologtostderr", "-v=1"} ss, err := Start(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", args, err) + t.Errorf("%q failed: %v", args, err) } defer func() { ss.Stop(t) @@ -334,7 +334,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { resp, err := retryablehttp.Get(u.String()) if err != nil { - t.Errorf("failed get: %v", err) + t.Errorf("failed to http get %q : %v", u.String(), err) } if resp.StatusCode != http.StatusOK { body, err := ioutil.ReadAll(resp.Body) @@ -349,12 +349,12 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { func validateDNS(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%q failed: %v", rr.Args, err) } names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4)) if err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for busybox pod : %v", err) } nslookup := func() error { @@ -364,12 +364,12 @@ func validateDNS(ctx context.Context, t *testing.T, profile string) { // If the coredns process was stable, this retry wouldn't be necessary. if err = retry.Expo(nslookup, 1*time.Second, Minutes(1)); err != nil { - t.Errorf("nslookup failing: %v", err) + t.Errorf("failed to do nslookup on kubernetes.default: %v", err) } want := []byte("10.96.0.1") if !bytes.Contains(rr.Stdout.Bytes(), want) { - t.Errorf("nslookup: got=%q, want=*%q*", rr.Stdout.Bytes(), want) + t.Errorf("failed nslookup: got=%q, want=*%q*", rr.Stdout.Bytes(), want) } } @@ -454,17 +454,17 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { // make sure the image is deleted. rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img)) if err == nil { - t.Errorf("expected the image be deleted and get error but got nil error ! cmd: %q", rr.Command()) + t.Errorf("expected an error. because image should not exist. but got *nil error* ! cmd: %q", rr.Command()) } // minikube cache reload. rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "reload")) if err != nil { - t.Errorf("expected %q to run successfully but got error %v", rr.Command(), err) + t.Errorf("expected %q to run successfully but got error: %v", rr.Command(), err) } // make sure 'cache reload' brought back the manually deleted image. rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img)) if err != nil { - t.Errorf("expected to get no error for %q but got %v", rr.Command(), err) + t.Errorf("expected %q to run successfully but got error: %v", rr.Command(), err) } }) diff --git a/test/integration/version_upgrade_test.go b/test/integration/version_upgrade_test.go index a03676a335..20d131d5ef 100644 --- a/test/integration/version_upgrade_test.go +++ b/test/integration/version_upgrade_test.go @@ -97,7 +97,7 @@ func TestVersionUpgrade(t *testing.T) { args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to start minikube HEAD with newest k8s version. args: %s : %v", rr.Args, err) } s, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "version", "--output=json")) @@ -127,6 +127,6 @@ func TestVersionUpgrade(t *testing.T) { args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("start and already started minikube failed. args: %q : %v", rr.Args, err) } } From a30ec2574cd9f6a7f596f3a1017b81275967cb91 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 20:18:05 -0700 Subject: [PATCH 261/668] improve logs for fn_mount_cmd tests' --- test/integration/docker_test.go | 10 +++++----- test/integration/fn_mount_cmd.go | 30 +++++++++++++++--------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/test/integration/docker_test.go b/test/integration/docker_test.go index 05075fe579..da26300dd5 100644 --- a/test/integration/docker_test.go +++ b/test/integration/docker_test.go @@ -39,27 +39,27 @@ func TestDockerFlags(t *testing.T) { args := append([]string{"start", "-p", profile, "--cache-images=false", "--memory=1800", "--install-addons=false", "--wait=false", "--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", "--docker-opt=icc=true", "--alsologtostderr", "-v=5"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to start minikube with args: %q : %v", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo systemctl show docker --property=Environment --no-pager")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to 'systemctl show docker' inside minikube. args %q: %v", rr.Args, err) } for _, envVar := range []string{"FOO=BAR", "BAZ=BAT"} { if !strings.Contains(rr.Stdout.String(), envVar) { - t.Errorf("env var %s missing: %s.", envVar, rr.Stdout) + t.Errorf("expected env key/value %q to be passed to minikube's docker and be included in: *%q*.", envVar, rr.Stdout) } } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo systemctl show docker --property=ExecStart --no-pager")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed on the second 'systemctl show docker' inside minikube. args %q: %v", rr.Args, err) } for _, opt := range []string{"--debug", "--icc=true"} { if !strings.Contains(rr.Stdout.String(), opt) { - t.Fatalf("%s = %q, want *%s*", rr.Command(), rr.Stdout, opt) + t.Fatalf("expected %q output to have include *%s* . output: %q", rr.Command(), opt, rr.Stdout) } } } diff --git a/test/integration/fn_mount_cmd.go b/test/integration/fn_mount_cmd.go index 8a3a9f68ee..6e292af109 100644 --- a/test/integration/fn_mount_cmd.go +++ b/test/integration/fn_mount_cmd.go @@ -66,10 +66,10 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { defer func() { if t.Failed() { - t.Logf("%s failed, getting debug info...", t.Name()) + t.Logf("%q failed, getting debug info...", t.Name()) rr, err := Run(t, exec.Command(Target(), "-p", profile, "ssh", "mount | grep 9p; ls -la /mount-9p; cat /mount-9p/pod-dates")) if err != nil { - t.Logf("%s: %v", rr.Command(), err) + t.Logf("debugging command %q failed : %v", rr.Command(), err) } else { t.Logf("(debug) %s:\n%s", rr.Command(), rr.Stdout) } @@ -78,7 +78,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // Cleanup in advance of future tests rr, err := Run(t, exec.Command(Target(), "-p", profile, "ssh", "sudo umount -f /mount-9p")) if err != nil { - t.Logf("%s: %v", rr.Command(), err) + t.Logf("%q: %v", rr.Command(), err) } ss.Stop(t) cancel() @@ -117,7 +117,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // Assert that we can access the mount without an error. Display for debugging. rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "--", "ls", "-la", guestMount)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed verifying accessing to the mount. args %q : %v", rr.Args, err) } t.Logf("guest mount directory contents\n%s", rr.Stdout) @@ -125,7 +125,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { tp := filepath.Join("/mount-9p", testMarker) rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "cat", tp)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to verify the mount contains unique test marked: args %q: %v", rr.Args, err) } if !bytes.Equal(rr.Stdout.Bytes(), wantFromTest) { @@ -136,28 +136,28 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // Start the "busybox-mount" pod. rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox-mount-test.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to 'kubectl replace' for busybox-mount-test. args %q : %v", rr.Args, err) } if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox-mount", Minutes(4)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for busybox-mount pod: %v", err) } // Read the file written by pod startup p := filepath.Join(tempDir, createdByPod) got, err := ioutil.ReadFile(p) if err != nil { - t.Errorf("readfile %s: %v", p, err) + t.Errorf("failed to read file created by pod %q: %v", p, err) } wantFromPod := []byte("test\n") if !bytes.Equal(got, wantFromPod) { - t.Errorf("%s = %q, want %q", p, got, wantFromPod) + t.Errorf("the content of the file %q is %q, but want it to be: *%q*", p, got, wantFromPod) } // test that file written from host was read in by the pod via cat /mount-9p/written-by-host; rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "logs", "busybox-mount")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to get kubectl logs for busybox-mount. args %q : %v", rr.Args, err) } if !bytes.Equal(rr.Stdout.Bytes(), wantFromTest) { t.Errorf("busybox-mount logs = %q, want %q", rr.Stdout.Bytes(), wantFromTest) @@ -169,27 +169,27 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // test that file written from host was read in by the pod via cat /mount-9p/fromhost; rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "stat", gp)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to stat the file %q iniside minikube : args %q: %v", gp, rr.Args, err) } if runtime.GOOS == "windows" { if strings.Contains(rr.Stdout.String(), "Access: 1970-01-01") { - t.Errorf("invalid access time: %v", rr.Stdout) + t.Errorf("expected to get valid access time but got: %q", rr.Stdout) } } if strings.Contains(rr.Stdout.String(), "Modify: 1970-01-01") { - t.Errorf("invalid modify time: %v", rr.Stdout) + t.Errorf("expected to get valid modify time but got: %q", rr.Stdout) } } p = filepath.Join(tempDir, createdByTestRemovedByPod) if _, err := os.Stat(p); err == nil { - t.Errorf("expected file %s to be removed", p) + t.Errorf("expected file %q to be removed but exists !", p) } p = filepath.Join(tempDir, createdByPodRemovedByTest) if err := os.Remove(p); err != nil { - t.Errorf("unexpected error removing file %s: %v", p, err) + t.Errorf("failed to remove file %q: %v", p, err) } } From 2ac46e8ba91754446b8a484d4086da7c55f70cd4 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 20:22:02 -0700 Subject: [PATCH 262/668] improve logs for fn_pvc --- test/integration/fn_pvc.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/integration/fn_pvc.go b/test/integration/fn_pvc.go index 785783fd80..c387a1750b 100644 --- a/test/integration/fn_pvc.go +++ b/test/integration/fn_pvc.go @@ -38,7 +38,7 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st defer cancel() if _, err := PodWait(ctx, t, profile, "kube-system", "integration-test=storage-provisioner", Minutes(4)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for storage-provisioner: %v", err) } checkStorageClass := func() error { @@ -58,13 +58,13 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st // Ensure the addon-manager has created the StorageClass before creating a claim, otherwise it won't be bound if err := retry.Expo(checkStorageClass, time.Millisecond*500, Seconds(100)); err != nil { - t.Errorf("no default storage class after retry: %v", err) + t.Errorf("failed to check for storage class: %v", err) } // Now create a testpvc rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "pvc.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("kubectl apply pvc.yaml failed: args %q: %v", rr.Args, err) } checkStoragePhase := func() error { @@ -84,6 +84,6 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st } if err := retry.Expo(checkStoragePhase, 2*time.Second, Minutes(4)); err != nil { - t.Fatalf("PV Creation failed with error: %v", err) + t.Fatalf("failed to check storage phase: %v", err) } } From b63f0488546478dc07543d92065b3074996b925f Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 20:26:10 -0700 Subject: [PATCH 263/668] improve logs for fn_tunnel_cmd --- test/integration/fn_tunnel_cmd.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/integration/fn_tunnel_cmd.go b/test/integration/fn_tunnel_cmd.go index e4598b3da7..dffb247fb4 100644 --- a/test/integration/fn_tunnel_cmd.go +++ b/test/integration/fn_tunnel_cmd.go @@ -50,7 +50,7 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { client, err := kapi.Client(profile) if err != nil { - t.Fatalf("client: %v", err) + t.Fatalf("failed to get kubernetes client for %q: %v", profile, err) } // Pre-Cleanup @@ -62,7 +62,7 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { args := []string{"-p", profile, "tunnel", "--alsologtostderr", "-v=1"} ss, err := Start(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", args, err) + t.Errorf("failed to start a tunnel: args %q: %v", args, err) } defer ss.Stop(t) @@ -99,7 +99,7 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } - t.Logf("kubectl get svc nginx-svc:\n%s", rr.Stdout) + t.Logf("failed to kubectl get svc nginx-svc:\n%s", rr.Stdout) } got := []byte{} @@ -120,11 +120,11 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { return nil } if err = retry.Expo(fetch, time.Millisecond*500, Minutes(2), 13); err != nil { - t.Errorf("failed to contact nginx at %s: %v", nginxIP, err) + t.Errorf("failed to hit nginx at %q: %v", nginxIP, err) } want := "Welcome to nginx!" if !strings.Contains(string(got), want) { - t.Errorf("body = %q, want *%s*", got, want) + t.Errorf("expected body to contain %q, but got *%q*", want, got) } } From 1f900e2bb583cf73f30ffc7df926bb5686f1df3e Mon Sep 17 00:00:00 2001 From: Prasad Katti Date: Wed, 25 Mar 2020 20:45:04 -0700 Subject: [PATCH 264/668] Add a tutorial for using AWS ECR with registry-creds addon --- .../configuring_creds_for_aws_ecr.md | 137 ++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 site/content/en/docs/Tutorials/configuring_creds_for_aws_ecr.md diff --git a/site/content/en/docs/Tutorials/configuring_creds_for_aws_ecr.md b/site/content/en/docs/Tutorials/configuring_creds_for_aws_ecr.md new file mode 100644 index 0000000000..2476394e0f --- /dev/null +++ b/site/content/en/docs/Tutorials/configuring_creds_for_aws_ecr.md @@ -0,0 +1,137 @@ +--- +title: "Configure credentials for AWS Elastic Container Registry using registry-creds addon" +linkTitle: "Configure creds for AWS ECR using registry-creds" +weight: 1 +date: 2020-03-25 +description: > + How to configure credentials for AWS ECR using the registry-creds addon for a minikube cluster +--- + +## Overview + +The minikube [registry-creds addon](https://github.com/kubernetes/minikube/tree/master/deploy/addons/registry-creds) enables developers to setup credentials for pulling images from AWS ECR from inside their minikube cluster. + +The addon automagically refreshes the service account token for the `default` service account in the `default` namespace. + + +## Prerequisites + +- a working minikube cluster +- a container image in AWS ECR that you would like to use +- AWS access keys that can be used to pull the above image +- AWS account number of the account hosting the registry + + +## Configuring and enabling the registry-creds addon + + +### Configure the registry-creds addon + +Configure the minikube registry-creds addon with the following command: + +Note: In this tutorial, we will focus only on the AWS ECR. + +```shell +minikube addons configure registry-creds +``` + +Follow the prompt and enter `y` for AWS ECR. Provide the requested information. It should look like this - +```shell +$ minikube addons configure registry-creds + +Do you want to enable AWS Elastic Container Registry? [y/n]: y +-- Enter AWS Access Key ID: +-- Enter AWS Secret Access Key: +-- (Optional) Enter AWS Session Token: +-- Enter AWS Region: us-west-2 +-- Enter 12 digit AWS Account ID (Comma separated list): +-- (Optional) Enter ARN of AWS role to assume: + +Do you want to enable Google Container Registry? [y/n]: n + +Do you want to enable Docker Registry? [y/n]: n + +Do you want to enable Azure Container Registry? [y/n]: n +✅ registry-creds was successfully configured + +``` + +### Enable the registry-creds addon + +Enable the minikube registry-creds addon with the following command: + +```shell +minikube addons enable registry-creds +``` + +### Create a deployment that uses an image in AWS ECR + +This tutorial will use a vanilla alpine image that has been already uploaded into a repository in AWS ECR. + +Let's use this alpine deployment that is setup to use the alpine image from ECR. Make sure you update the `image` field with a valid URI. + +`alpine-deployment.yaml` +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alpine-deployment + labels: + app: alpine +spec: + replicas: 1 + selector: + matchLabels: + app: alpine + template: + metadata: + labels: + app: alpine + spec: + containers: + - name: alpine + image: <>.dkr.ecr.<>.amazonaws.com/alpine:latest + command: ['sh', '-c', 'echo Container is Running ; sleep 3600'] +``` + +Create a file called `alpine-deployment.yaml` and paste the contents above. Be sure to replace <> and <> with your actual account number and aws region. Then create the alpine deployment with the following command: + +```shell +kubectl apply -f alpine-deployment.yaml +``` + +### Test your deployment + +Describe the pod and verify the image pull was successful: + +```shell +kubectl describe pods << alpine-deployment-pod-name >> +``` + +You should see an event like this: + +```text +Successfully pulled image "<>.dkr.ecr.<>.amazonaws.com/alpine:latest" +``` + +If you do not see that event, look at the troubleshooting section. + + +## Review + +In the above tutorial, we configured the `registry-creds` addon to refresh the credentials for AWS ECR so that we could pull private container images onto our minikube cluster. We ultimately created a deployment that used an image in a private AWS ECR repository. + + +## Troubleshooting + +- Check if you have a secret called `awsecr-cred` in the `default` namespace by running `kubectl get secrets`. +- Check if the image path is valid. +- Check if the registry-creds addon is enabled by using `minikube addons list`. + +## Caveats + +The service account token for the `default` service account in the `default` namespace is kept updated by the addon. If you create your deployment in a different namespace, the image pull will not work. + +## Related articles + +- [registry-creds addon](https://github.com/kubernetes/minikube/tree/master/deploy/addons/registry-creds) From 362254cfc541383d709985eaf5b6ff0ff94e4431 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 21:43:32 -0700 Subject: [PATCH 265/668] improve test log formatting for functional_test --- test/integration/functional_test.go | 148 ++++++++++++++-------------- 1 file changed, 74 insertions(+), 74 deletions(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 127ec9dd99..f9c4b3b440 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -63,11 +63,11 @@ func TestFunctional(t *testing.T) { } p := localSyncTestPath() if err := os.Remove(p); err != nil { - t.Logf("unable to remove %s: %v", p, err) + t.Logf("unable to remove %q: %v", p, err) } p = localTestCertPath() if err := os.Remove(p); err != nil { - t.Logf("unable to remove %s: %v", p, err) + t.Logf("unable to remove %q: %v", p, err) } CleanupWithLogs(t, profile, cancel) }() @@ -137,7 +137,7 @@ func TestFunctional(t *testing.T) { func validateNodeLabels(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "nodes", "--output=go-template", "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Args, err) } expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name"} for _, el := range expectedLabels { @@ -155,10 +155,10 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) { c := exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && "+Target()+" status -p "+profile) rr, err := Run(t, c) if err != nil { - t.Fatalf("Failed to do minikube status after eval-ing docker-env %s", err) + t.Fatalf("failed to do minikube status after eval-ing docker-env %s", err) } if !strings.Contains(rr.Output(), "Running") { - t.Fatalf("Expected status output to include 'Running' after eval docker-env but got \n%s", rr.Output()) + t.Fatalf("expected status output to include 'Running' after eval docker-env but got: *%q*", rr.Output()) } mctx, cancel = context.WithTimeout(ctx, Seconds(13)) @@ -167,12 +167,12 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) { c = exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && docker images") rr, err = Run(t, c) if err != nil { - t.Fatalf("Failed to test eval docker-evn %s", err) + t.Fatalf("failed to run minikube docker-env. args %q : %v ", rr.Args, err) } expectedImgInside := "gcr.io/k8s-minikube/storage-provisioner" if !strings.Contains(rr.Output(), expectedImgInside) { - t.Fatalf("Expected 'docker ps' to have %q from docker-daemon inside minikube. the docker ps output is:\n%q\n", expectedImgInside, rr.Output()) + t.Fatalf("expected 'docker images' to have %q inside minikube. but the output is: *%q*", expectedImgInside, rr.Output()) } } @@ -180,11 +180,11 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) { func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { srv, err := startHTTPProxy(t) if err != nil { - t.Fatalf("Failed to set up the test proxy: %s", err) + t.Fatalf("failed to set up the test proxy: %s", err) } // Use more memory so that we may reliably fit MySQL and nginx - startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory", "2500MB"}, StartArgs()...) + startArgs := append([]string{"start", "-p", profile, "--wait=true"}, StartArgs()...) c := exec.CommandContext(ctx, Target(), startArgs...) env := os.Environ() env = append(env, fmt.Sprintf("HTTP_PROXY=%s", srv.Addr)) @@ -192,7 +192,7 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { c.Env = env rr, err := Run(t, c) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed minikube start. args %q: %v", rr.Args, err) } want := "Found network options:" @@ -210,10 +210,10 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { func validateKubeContext(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "config", "current-context")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to get current-context. args %q : %v", rr.Args, err) } if !strings.Contains(rr.Stdout.String(), profile) { - t.Errorf("current-context = %q, want %q", rr.Stdout.String(), profile) + t.Errorf("expected current-context = %q, but got *%q*", profile, rr.Stdout.String()) } } @@ -221,13 +221,13 @@ func validateKubeContext(ctx context.Context, t *testing.T, profile string) { func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "po", "-A")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to get kubectl pods: args %q : %v", rr.Args, err) } if rr.Stderr.String() != "" { - t.Errorf("%s: got unexpected stderr: %s", rr.Command(), rr.Stderr) + t.Errorf("expected stderr to be empty but got *%q*: args %q", rr.Stderr, rr.Command()) } if !strings.Contains(rr.Stdout.String(), "kube-system") { - t.Errorf("%s = %q, want *kube-system*", rr.Command(), rr.Stdout) + t.Errorf("expected stdout to include *kube-system* but got *%q*. args: %q", rr.Stdout, rr.Command()) } } @@ -237,7 +237,7 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) kubectlArgs := []string{"-p", profile, "kubectl", "--", "--context", profile, "get", "pods"} rr, err := Run(t, exec.CommandContext(ctx, Target(), kubectlArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to get pods. args %q: %v", rr.Args, err) } } @@ -245,12 +245,12 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) func validateComponentHealth(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "cs", "-o=json")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to get components. args %q: %v", rr.Args, err) } cs := api.ComponentStatusList{} d := json.NewDecoder(bytes.NewReader(rr.Stdout.Bytes())) if err := d.Decode(&cs); err != nil { - t.Fatalf("decode: %v", err) + t.Fatalf("failed to decode kubectl json output: args %q : %v", rr.Args, err) } for _, i := range cs.Items { @@ -270,28 +270,29 @@ func validateComponentHealth(ctx context.Context, t *testing.T, profile string) func validateStatusCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) if err != nil { - t.Errorf("%q failed: %v", rr.Args, err) + t.Errorf("failed to run minikube status. args %q : %v", rr.Args, err) } // Custom format rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-f", "host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}")) if err != nil { - t.Errorf("%q failed: %v", rr.Args, err) + t.Errorf("failed to run minikube status with custom format: args %q: %v", rr.Args, err) } - match, _ := regexp.MatchString(`host:([A-z]+),kublet:([A-z]+),apiserver:([A-z]+),kubeconfig:([A-z]+)`, rr.Stdout.String()) + re := `host:([A-z]+),kublet:([A-z]+),apiserver:([A-z]+),kubeconfig:([A-z]+)` + match, _ := regexp.MatchString(re, rr.Stdout.String()) if !match { - t.Errorf("%q failed: %v. Output for custom format did not match", rr.Args, err) + t.Errorf("failed to match regex %q for minikube status with custom format. args %q. output %q", re, rr.Args, rr.Output()) } // Json output rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-o", "json")) if err != nil { - t.Errorf("%q failed: %v", rr.Args, err) + t.Errorf("failed to run minikube status with json output. args %q : %v", rr.Args, err) } var jsonObject map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { - t.Errorf("%q failed: %v", rr.Args, err) + t.Errorf("failed to decode json from minikube status. args %q. %v", rr.Args, err) } if _, ok := jsonObject["Host"]; !ok { t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "Host") @@ -312,7 +313,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { args := []string{"dashboard", "--url", "-p", profile, "--alsologtostderr", "-v=1"} ss, err := Start(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%q failed: %v", args, err) + t.Errorf("failed to run minikube dashboard. args %q : %v", args, err) } defer func() { ss.Stop(t) @@ -339,7 +340,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { if resp.StatusCode != http.StatusOK { body, err := ioutil.ReadAll(resp.Body) if err != nil { - t.Errorf("Unable to read http response body: %v", err) + t.Errorf("failed to read http response body from dashboard %q: %v", u.String(), err) } t.Errorf("%s returned status code %d, expected %d.\nbody:\n%s", u, resp.StatusCode, http.StatusOK, body) } @@ -349,7 +350,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { func validateDNS(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox.yaml"))) if err != nil { - t.Fatalf("%q failed: %v", rr.Args, err) + t.Fatalf("failed to kubectl replace busybox : args %q: %v", rr.Args, err) } names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4)) @@ -407,29 +408,29 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { t.Run("cache", func(t *testing.T) { t.Run("add", func(t *testing.T) { for _, img := range []string{"busybox:latest", "busybox:1.28.4-glibc", "k8s.gcr.io/pause:latest"} { - _, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img)) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img)) if err != nil { - t.Errorf("Failed to cache image %q", img) + t.Errorf("failed to cache add image %q. args %q err %v", img, rr.Args, err) } } }) t.Run("delete_busybox:1.28.4-glibc", func(t *testing.T) { - _, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "busybox:1.28.4-glibc")) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "busybox:1.28.4-glibc")) if err != nil { - t.Errorf("failed to delete image busybox:1.28.4-glibc from cache: %v", err) + t.Errorf("failed to delete image busybox:1.28.4-glibc from cache. args %q: %v", rr.Args, err) } }) t.Run("list", func(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "list")) if err != nil { - t.Errorf("cache list failed: %v", err) + t.Errorf("failed to do cache list. args %q: %v", rr.Args, err) } if !strings.Contains(rr.Output(), "k8s.gcr.io/pause") { - t.Errorf("cache list did not include k8s.gcr.io/pause") + t.Errorf("expected 'cache list' output to include 'k8s.gcr.io/pause' but got:\n ***%q***", rr.Output()) } if strings.Contains(rr.Output(), "busybox:1.28.4-glibc") { - t.Errorf("cache list should not include busybox:1.28.4-glibc") + t.Errorf("expected 'cache list' output not to include busybox:1.28.4-glibc but got:\n ***%q***", rr.Output()) } }) @@ -439,7 +440,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { t.Errorf("failed to get images by %q ssh %v", rr.Command(), err) } if !strings.Contains(rr.Output(), "1.28.4-glibc") { - t.Errorf("expected '1.28.4-glibc' to be in the output: %s", rr.Output()) + t.Errorf("expected '1.28.4-glibc' to be in the output but got %q", rr.Output()) } }) @@ -490,16 +491,16 @@ func validateConfigCmd(ctx context.Context, t *testing.T, profile string) { args := append([]string{"-p", profile, "config"}, tc.args...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil && tc.wantErr == "" { - t.Errorf("unexpected failure: %s failed: %v", rr.Args, err) + t.Errorf("failed to config minikube. args %q : %v", rr.Args, err) } got := strings.TrimSpace(rr.Stdout.String()) if got != tc.wantOut { - t.Errorf("%s stdout got: %q, want: %q", rr.Command(), got, tc.wantOut) + t.Errorf("expected config output for %q to be -%q- but got *%q*", rr.Command(), tc.wantOut, got) } got = strings.TrimSpace(rr.Stderr.String()) if got != tc.wantErr { - t.Errorf("%s stderr got: %q, want: %q", rr.Command(), got, tc.wantErr) + t.Errorf("expected config error for %q to be -%q- but got *%q*", rr.Command(), tc.wantErr, got) } } } @@ -512,7 +513,7 @@ func validateLogsCmd(ctx context.Context, t *testing.T, profile string) { } for _, word := range []string{"Docker", "apiserver", "Linux", "kubelet"} { if !strings.Contains(rr.Stdout.String(), word) { - t.Errorf("minikube logs missing expected word: %q", word) + t.Errorf("excpeted minikube logs to include word: -%q- but got \n***%q***\n", word, rr.Output()) } } } @@ -549,7 +550,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { // List profiles rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to list profiles: args %q : %v", rr.Args, err) } // Table output @@ -563,21 +564,20 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { } } if !profileExists { - t.Errorf("%s failed: Missing profile '%s'. Got '\n%s\n'", rr.Args, profile, rr.Stdout.String()) + t.Errorf("expected 'profile list' output to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Args) } - }) t.Run("profile_json_output", func(t *testing.T) { // Json output rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to list profiles with json format. args %q: %v", rr.Args, err) } var jsonObject map[string][]map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to decode json from profile list: args %q: %v", rr.Args, err) } validProfiles := jsonObject["valid"] profileExists := false @@ -588,7 +588,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { } } if !profileExists { - t.Errorf("%s failed: Missing profile '%s'. Got '\n%s\n'", rr.Args, profile, rr.Stdout.String()) + t.Errorf("expected the json of 'profile list' to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Args) } }) @@ -598,7 +598,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=gcr.io/hello-minikube-zero-install/hello-node")) if err != nil { - t.Logf("%s failed: %v (may not be an error)", rr.Args, err) + t.Logf("% failed: %v (may not be an error)", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "expose", "deployment", "hello-node", "--type=NodePort", "--port=8080")) if err != nil { @@ -606,48 +606,48 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { } if _, err := PodWait(ctx, t, profile, "default", "app=hello-node", Minutes(10)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for hello-node pod: %v", err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "list")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to do service list. args %q : %v", rr.Args, err) } if !strings.Contains(rr.Stdout.String(), "hello-node") { - t.Errorf("service list got %q, wanted *hello-node*", rr.Stdout.String()) + t.Errorf("expected 'service list' to contain *hello-node* but got -%q-", rr.Stdout.String()) } // Test --https --url mode rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "--namespace=default", "--https", "--url", "hello-node")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to get service url. args %q : %v", rr.Args, err) } if rr.Stderr.String() != "" { - t.Errorf("unexpected stderr output: %s", rr.Stderr) + t.Errorf("expected stderr to be empty but got *%q*", rr.Stderr) } endpoint := strings.TrimSpace(rr.Stdout.String()) u, err := url.Parse(endpoint) if err != nil { - t.Fatalf("failed to parse %q: %v", endpoint, err) + t.Fatalf("failed to parse service url endpoint %q: %v", endpoint, err) } if u.Scheme != "https" { - t.Errorf("got scheme: %q, expected: %q", u.Scheme, "https") + t.Errorf("expected scheme to be 'https' but got %q", u.Scheme) } // Test --format=IP rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url", "--format={{.IP}}")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to get service url with custom format. args %q: %v", rr.Args, err) } if strings.TrimSpace(rr.Stdout.String()) != u.Hostname() { - t.Errorf("%s = %q, wanted %q", rr.Args, rr.Stdout.String(), u.Hostname()) + t.Errorf("expected 'service --format={{.IP}}' output to be -%q- but got *%q* . args %q.", u.Hostname(), rr.Stdout.String(), rr.Args) } // Test a regular URLminikube rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to get service url. args: %q: %v", rr.Args, err) } endpoint = strings.TrimSpace(rr.Stdout.String()) @@ -656,7 +656,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { t.Fatalf("failed to parse %q: %v", endpoint, err) } if u.Scheme != "http" { - t.Fatalf("got scheme: %q, expected: %q", u.Scheme, "http") + t.Fatalf("expected scheme to be -'http'- got scheme: *%q*", "http", u.Scheme) } t.Logf("url: %s", endpoint) @@ -665,7 +665,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { t.Fatalf("get failed: %v\nresp: %v", err, resp) } if resp.StatusCode != http.StatusOK { - t.Fatalf("%s = status code %d, want %d", u, resp.StatusCode, http.StatusOK) + t.Fatalf("expeced status code for %q to be -%q- but got *%q*", endpoint, http.StatusOK, resp.StatusCode) } } @@ -674,23 +674,23 @@ func validateAddonsCmd(ctx context.Context, t *testing.T, profile string) { // Table output rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to do addon list: args %q : %v", rr.Args, err) } for _, a := range []string{"dashboard", "ingress", "ingress-dns"} { if !strings.Contains(rr.Output(), a) { - t.Errorf("addon list expected to include %q but didn't output: %q", a, rr.Output()) + t.Errorf("expected 'addon list' output to include -%q- but got *%q*", a, rr.Output()) } } // Json output rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list", "-o", "json")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to do addon list with json output. args %q: %v", rr.Args, err) } var jsonObject map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to decode addon list json output : %v", err) } } @@ -702,10 +702,10 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) { want := "hello\n" rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("echo hello"))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to run an ssh command. args %q : %v", rr.Args, err) } if rr.Stdout.String() != want { - t.Errorf("%v = %q, want = %q", rr.Args, rr.Stdout.String(), want) + t.Errorf("expected minikube ssh command output to be -%q- but got *%q*. args %q", want, rr.Stdout.String(), rr.Args) } } @@ -713,12 +713,12 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) { func validateMySQL(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "mysql.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to kubectl replace mysql: args %q failed: %v", rr.Args, err) } names, err := PodWait(ctx, t, profile, "default", "app=mysql", Minutes(10)) if err != nil { - t.Fatalf("podwait: %v", err) + t.Fatalf("failed waiting for mysql pod: %v", err) } // Retry, as mysqld first comes up without users configured. Scan for names in case of a reschedule. @@ -726,8 +726,8 @@ func validateMySQL(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "--", "mysql", "-ppassword", "-e", "show databases;")) return err } - if err = retry.Expo(mysql, 2*time.Second, Seconds(180)); err != nil { - t.Errorf("mysql failing: %v", err) + if err = retry.Expo(mysql, 1*time.Second, Seconds(200)); err != nil { + t.Errorf("failed to exec 'mysql -ppassword -e show databases;': %v", err) } } @@ -757,12 +757,12 @@ func setupFileSync(ctx context.Context, t *testing.T, profile string) { t.Logf("local sync path: %s", p) err := copy.Copy("./testdata/sync.test", p) if err != nil { - t.Fatalf("copy: %v", err) + t.Fatalf("failed to copy ./testdata/sync.test : %v", err) } err = copy.Copy("./testdata/minikube_test.pem", localTestCertPath()) if err != nil { - t.Fatalf("copy: %v", err) + t.Fatalf("failed to copy ./testdata/minikube_test.pem : %v", err) } } @@ -783,7 +783,7 @@ func validateFileSync(ctx context.Context, t *testing.T, profile string) { expected, err := ioutil.ReadFile("./testdata/sync.test") if err != nil { - t.Errorf("test file not found: %v", err) + t.Errorf("failed to read test file '/testdata/sync.test' : %v", err) } if diff := cmp.Diff(string(expected), got); diff != "" { @@ -813,13 +813,13 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) { t.Logf("Checking for existence of %s within VM", vp) rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat %s", vp))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to check existance of %q inside minikube. args %q: %v", vp, rr.Args, err) } // Strip carriage returned by ssh got := strings.Replace(rr.Stdout.String(), "\r", "", -1) if diff := cmp.Diff(string(want), got); diff != "" { - t.Errorf("minikube_test.pem -> %s mismatch (-want +got):\n%s", vp, diff) + t.Errorf("failed verify pem file. minikube_test.pem -> %s mismatch (-want +got):\n%s", vp, diff) } } } @@ -828,7 +828,7 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) { func validateUpdateContextCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "update-context", "--alsologtostderr", "-v=2")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to run minikube update-context: args %q: %v", rr.Args, err) } want := []byte("IP was already correctly configured") From 90cac63b8517b9eca2470d995486236374a37d98 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 21:48:36 -0700 Subject: [PATCH 266/668] improve formatting for TestGuestEnvironment --- test/integration/guest_env_test.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/test/integration/guest_env_test.go b/test/integration/guest_env_test.go index 41344ccbc8..e59284df60 100644 --- a/test/integration/guest_env_test.go +++ b/test/integration/guest_env_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/minikube/pkg/minikube/vmpath" ) +// TestGuestEnvironment verifies files and packges installed inside minikube ISO/Base image func TestGuestEnvironment(t *testing.T) { MaybeParallel(t) @@ -37,18 +38,18 @@ func TestGuestEnvironment(t *testing.T) { args := append([]string{"start", "-p", profile, "--install-addons=false", "--memory=1800", "--wait=false"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to start minikube: args %q: %v", rr.Args, err) } // Run as a group so that our defer doesn't happen as tests are runnings t.Run("Binaries", func(t *testing.T) { - for _, pkg := range []string{"git", "rsync", "curl", "wget", "socat", "iptables", "VBoxControl", "VBoxService"} { + for _, pkg := range []string{"git", "rsync", "curl", "wget", "socat", "iptables", "VBoxControl", "VBoxService", "crictl", "podman", "docker"} { pkg := pkg t.Run(pkg, func(t *testing.T) { t.Parallel() rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("which %s", pkg))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to verify existance of %q binary : args %q: %v", pkg, rr.Args, err) } }) } @@ -67,9 +68,9 @@ func TestGuestEnvironment(t *testing.T) { mount := mount t.Run(mount, func(t *testing.T) { t.Parallel() - rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("df -t ext4 %s | grep %s", mount, mount))) + rr, err := Run(t, exec.CommandContext(ctx, Targt(), "-p", profile, "ssh", fmt.Sprintf("df -t ext4 %s | grep %s", mount, mount))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to verify existance of %q mount. args %q: %v", mount, rr.Args, err) } }) } From 73a9653c80a5ec2b5efbcf9c9b72245c3dcc1c91 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 21:52:00 -0700 Subject: [PATCH 267/668] improve logging for gvisor test --- test/integration/gvisor_addon_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/test/integration/gvisor_addon_test.go b/test/integration/gvisor_addon_test.go index d69f1d205b..6e20c249c6 100644 --- a/test/integration/gvisor_addon_test.go +++ b/test/integration/gvisor_addon_test.go @@ -50,7 +50,7 @@ func TestGvisorAddon(t *testing.T) { startArgs := append([]string{"start", "-p", profile, "--memory=2200", "--container-runtime=containerd", "--docker-opt", "containerd=/var/run/containerd/containerd.sock"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to start minikube: args %q: %v", rr.Args, err) } // If it exists, include a locally built gvisor image @@ -65,7 +65,7 @@ func TestGvisorAddon(t *testing.T) { } if _, err := PodWait(ctx, t, profile, "kube-system", "kubernetes.io/minikube-addons=gvisor", Minutes(4)); err != nil { - t.Fatalf("waiting for gvisor controller to be up: %v", err) + t.Fatalf("failed waiting for 'gvisor controller' pod: %v", err) } // Create an untrusted workload @@ -80,29 +80,29 @@ func TestGvisorAddon(t *testing.T) { } if _, err := PodWait(ctx, t, profile, "default", "run=nginx,untrusted=true", Minutes(4)); err != nil { - t.Errorf("nginx: %v", err) + t.Errorf("failed waiting for nginx pod: %v", err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx,runtime=gvisor", Minutes(4)); err != nil { - t.Errorf("nginx: %v", err) + t.Errorf("failed waitinf for gvisor pod: %v", err) } // Ensure that workloads survive a restart rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("faild stopping minikube. args %q : %v", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed starting minikube after a stop. args %q, %v", rr.Args, err) } if _, err := PodWait(ctx, t, profile, "kube-system", "kubernetes.io/minikube-addons=gvisor", Minutes(4)); err != nil { - t.Errorf("waiting for gvisor controller to be up: %v", err) + t.Errorf("failed waiting for 'gvisor controller' pod : %v", err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx,untrusted=true", Minutes(4)); err != nil { - t.Errorf("nginx: %v", err) + t.Errorf("failed waiting for 'nginx' pod : %v", err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx,runtime=gvisor", Minutes(4)); err != nil { - t.Errorf("nginx: %v", err) + t.Errorf("failed waiting for 'gvisor' pod : %v", err) } } From acc951033b9a3a8a86b65659ee6c54ec578fe6d0 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 22:10:32 -0700 Subject: [PATCH 268/668] improve test logs for start_stop_delete --- test/integration/start_stop_delete_test.go | 26 +++++++++++----------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index e948f4b6ef..8c6ab2232f 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -92,7 +92,7 @@ func TestStartStop(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed starting minikube -first start-. args %q: %v", rr.Args, err) } if !strings.Contains(tc.name, "cni") { @@ -101,43 +101,43 @@ func TestStartStop(t *testing.T) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile, "--alsologtostderr", "-v=3")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed stopping minikube - first stop-. args %q : %v", rr.Args, err) } // The none driver never really stops if !NoneDriver() { got := Status(ctx, t, Target(), profile, "Host") if got != state.Stopped.String() { - t.Errorf("post-stop host status = %q; want = %q", got, state.Stopped) + t.Errorf("expected post-stop host status to be -%q- but got *%q*", state.Stopped, got) } } // Enable an addon to assert it comes up afterwards rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to enable an addon while minikube is stopped. args %q: ", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { // Explicit fatal so that failures don't move directly to deletion - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("Failed to start minikube after stop -Second Start-. args %q: %v", rr.Args, err) } if strings.Contains(tc.name, "cni") { t.Logf("WARNING: cni mode requires additional setup before pods can schedule :(") } else { if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4)); err != nil { - t.Fatalf("post-stop-start pod wait: %v", err) + t.Fatalf("failed waiting for pod 'busybox' post-stop-start: %v", err) } if _, err := PodWait(ctx, t, profile, "kubernetes-dashboard", "k8s-app=kubernetes-dashboard", Minutes(4)); err != nil { - t.Fatalf("post-stop-start addon wait: %v", err) + t.Fatalf("failed waiting for 'addon dashboard' pod post-stop-start: %v", err) } } got := Status(ctx, t, Target(), profile, "Host") if got != state.Running.String() { - t.Errorf("post-start host status = %q; want = %q", got, state.Running) + t.Errorf("expected host status after start-stop-start to be -%q- but got *%q*", state.Running, got) } if !NoneDriver() { @@ -150,7 +150,7 @@ func TestStartStop(t *testing.T) { // Normally handled by cleanuprofile, but not fatal there rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to clean up: args %q: %v", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "config", "get-contexts", profile)) @@ -158,7 +158,7 @@ func TestStartStop(t *testing.T) { t.Logf("config context error: %v (may be ok)", err) } if rr.ExitCode != 1 { - t.Errorf("wanted exit code 1, got %d. output: %s", rr.ExitCode, rr.Output()) + t.Errorf("expected exit code 1, got %d. output: %s", rr.ExitCode, rr.Output()) } } }) @@ -250,14 +250,14 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version rr, err := Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "sudo crictl images -o json")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed tp get images inside minikube. args %q: %v", rr.Args, err) } jv := map[string][]struct { Tags []string `json:"repoTags"` }{} err = json.Unmarshal(rr.Stdout.Bytes(), &jv) if err != nil { - t.Errorf("images unmarshal: %v", err) + t.Errorf("failed to decode images json %v. output: %q", err, rr.Output()) } found := map[string]bool{} for _, img := range jv["images"] { @@ -274,7 +274,7 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version } want, err := images.Kubeadm("", version) if err != nil { - t.Errorf("kubeadm images: %v", version) + t.Errorf("failed to get kubeadm images for %s : %v", version, err) } gotImages := []string{} for k := range found { From dc5dd62b58f029609e6cfa38c503d21f379bf5b0 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 22:15:24 -0700 Subject: [PATCH 269/668] lint --- test/integration/aaa_download_only_test.go | 6 +++--- test/integration/addons_test.go | 2 +- test/integration/functional_test.go | 10 +++++----- test/integration/start_stop_delete_test.go | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index cf6239cd9c..2c75e0d47e 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -156,9 +156,9 @@ func TestDownloadOnlyKic(t *testing.T) { args := []string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr"} args = append(args, StartArgs()...) - rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) - if err != nil { - t.Errorf("start with download only failed %q : %v:\n%s", args, err) + + if _, err := Run(t, exec.CommandContext(ctx, Target(), args...)); err != nil { + t.Errorf("start with download only failed %q : %v", args, err) } // Make sure the downloaded image tarball exists diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index 43ad112651..3fa3a63d4c 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -179,7 +179,7 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { t.Fatalf("failed run minikube ip. args %q : %v", rr.Args, err) } if rr.Stderr.String() != "" { - t.Errorf("expected stderr to be -empty- but got: *%q*", rr.Args, rr.Stderr) + t.Errorf("expected stderr to be -empty- but got: *%q* . args %q", rr.Stderr, rr.Args) } endpoint := fmt.Sprintf("http://%s:%d", strings.TrimSpace(rr.Stdout.String()), 5000) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index f9c4b3b440..aa9ac58eb0 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -598,11 +598,11 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=gcr.io/hello-minikube-zero-install/hello-node")) if err != nil { - t.Logf("% failed: %v (may not be an error)", rr.Args, err) + t.Logf("%q failed: %v (may not be an error).", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "expose", "deployment", "hello-node", "--type=NodePort", "--port=8080")) if err != nil { - t.Logf("%s failed: %v (may not be an error)", rr.Args, err) + t.Logf("%q failed: %v (may not be an error)", rr.Args, err) } if _, err := PodWait(ctx, t, profile, "default", "app=hello-node", Minutes(10)); err != nil { @@ -656,7 +656,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { t.Fatalf("failed to parse %q: %v", endpoint, err) } if u.Scheme != "http" { - t.Fatalf("expected scheme to be -'http'- got scheme: *%q*", "http", u.Scheme) + t.Fatalf("expected scheme to be -%q- got scheme: *%q*", "http", u.Scheme) } t.Logf("url: %s", endpoint) @@ -665,7 +665,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { t.Fatalf("get failed: %v\nresp: %v", err, resp) } if resp.StatusCode != http.StatusOK { - t.Fatalf("expeced status code for %q to be -%q- but got *%q*", endpoint, http.StatusOK, resp.StatusCode) + t.Fatalf("expected status code for %q to be -%q- but got *%q*", endpoint, http.StatusOK, resp.StatusCode) } } @@ -813,7 +813,7 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) { t.Logf("Checking for existence of %s within VM", vp) rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat %s", vp))) if err != nil { - t.Errorf("failed to check existance of %q inside minikube. args %q: %v", vp, rr.Args, err) + t.Errorf("failed to check existence of %q inside minikube. args %q: %v", vp, rr.Args, err) } // Strip carriage returned by ssh diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index 8c6ab2232f..21ca1d57ed 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -115,13 +115,13 @@ func TestStartStop(t *testing.T) { // Enable an addon to assert it comes up afterwards rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("failed to enable an addon while minikube is stopped. args %q: ", rr.Args, err) + t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { // Explicit fatal so that failures don't move directly to deletion - t.Fatalf("Failed to start minikube after stop -Second Start-. args %q: %v", rr.Args, err) + t.Fatalf("failed to start minikube post-stop. args %q: %v", rr.Args, err) } if strings.Contains(tc.name, "cni") { From 2e64eb795227911aee5535f81534245be34807ce Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 22:21:19 -0700 Subject: [PATCH 270/668] convert all rr.Args to rr.Command() --- test/integration/aaa_download_only_test.go | 4 +- test/integration/aab_offline_test.go | 2 +- test/integration/addons_test.go | 40 +++++----- test/integration/docker_test.go | 6 +- test/integration/fn_mount_cmd.go | 10 +-- test/integration/fn_pvc.go | 2 +- test/integration/fn_tunnel_cmd.go | 4 +- test/integration/functional_test.go | 92 +++++++++++----------- test/integration/guest_env_test.go | 6 +- test/integration/gvisor_addon_test.go | 14 ++-- test/integration/none_test.go | 8 +- test/integration/start_stop_delete_test.go | 26 +++--- test/integration/version_upgrade_test.go | 8 +- 13 files changed, 111 insertions(+), 111 deletions(-) diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index 2c75e0d47e..27442b31cf 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -129,7 +129,7 @@ func TestDownloadOnly(t *testing.T) { } rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all")) if err != nil { - t.Errorf("failed to delete all. args: %q : %v", rr.Args, err) + t.Errorf("failed to delete all. args: %q : %v", rr.Command(), err) } }) // Delete should always succeed, even if previously partially or fully deleted. @@ -139,7 +139,7 @@ func TestDownloadOnly(t *testing.T) { } rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) if err != nil { - t.Errorf("failed to delete. args: %q: %v", rr.Args, err) + t.Errorf("failed to delete. args: %q: %v", rr.Command(), err) } }) }) diff --git a/test/integration/aab_offline_test.go b/test/integration/aab_offline_test.go index fb1cdb710f..c635ef09ef 100644 --- a/test/integration/aab_offline_test.go +++ b/test/integration/aab_offline_test.go @@ -53,7 +53,7 @@ func TestOffline(t *testing.T) { rr, err := Run(t, c) if err != nil { // Fatal so that we may collect logs before stop/delete steps - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } }) } diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index 3fa3a63d4c..e90249a49e 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -43,7 +43,7 @@ func TestAddons(t *testing.T) { args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "-v=1", "--addons=ingress", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } // Parallelized tests @@ -69,15 +69,15 @@ func TestAddons(t *testing.T) { // Assert that disable/enable works offline rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile)) if err != nil { - t.Errorf("failed to stop minikube. args %q : %v", rr.Args, err) + t.Errorf("failed to stop minikube. args %q : %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("failed to enable dashboard addon: args %q : %v", rr.Args, err) + t.Errorf("failed to enable dashboard addon: args %q : %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "disable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("failed to disable dashboard addon: args %q : %v", rr.Args, err) + t.Errorf("failed to disable dashboard addon: args %q : %v", rr.Command(), err) } } @@ -100,11 +100,11 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-ing.yaml"))) if err != nil { - t.Errorf("failed to kubectl replace nginx-ing. args %q. %v", rr.Args, err) + t.Errorf("failed to kubectl replace nginx-ing. args %q. %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-pod-svc.yaml"))) if err != nil { - t.Errorf("failed to kubectl replace nginx-pod-svc. args %q. %v", rr.Args, err) + t.Errorf("failed to kubectl replace nginx-pod-svc. args %q. %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx", Minutes(4)); err != nil { @@ -121,10 +121,10 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { return err } if rr.Stderr.String() != "" { - t.Logf("%v: unexpected stderr: %s (may be temproary)", rr.Args, rr.Stderr) + t.Logf("%v: unexpected stderr: %s (may be temproary)", rr.Command(), rr.Stderr) } if !strings.Contains(rr.Stdout.String(), want) { - return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want) + return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want) } return nil } @@ -135,7 +135,7 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "ingress", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("failed to disable ingress addon. args %q : %v", rr.Args, err) + t.Errorf("failed to disable ingress addon. args %q : %v", rr.Command(), err) } } @@ -161,12 +161,12 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { // Test from inside the cluster (no curl available on busybox) rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "po", "-l", "run=registry-test", "--now")) if err != nil { - t.Logf("pre-cleanup %s failed: %v (not a problem)", rr.Args, err) + t.Logf("pre-cleanup %s failed: %v (not a problem)", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "registry-test", "--restart=Never", "--image=busybox", "-it", "--", "sh", "-c", "wget --spider -S http://registry.kube-system.svc.cluster.local")) if err != nil { - t.Errorf("failed to hit registry.kube-system.svc.cluster.local. args %q failed: %v", rr.Args, err) + t.Errorf("failed to hit registry.kube-system.svc.cluster.local. args %q failed: %v", rr.Command(), err) } want := "HTTP/1.1 200" if !strings.Contains(rr.Stdout.String(), want) { @@ -176,10 +176,10 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { // Test from outside the cluster rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ip")) if err != nil { - t.Fatalf("failed run minikube ip. args %q : %v", rr.Args, err) + t.Fatalf("failed run minikube ip. args %q : %v", rr.Command(), err) } if rr.Stderr.String() != "" { - t.Errorf("expected stderr to be -empty- but got: *%q* . args %q", rr.Stderr, rr.Args) + t.Errorf("expected stderr to be -empty- but got: *%q* . args %q", rr.Stderr, rr.Command()) } endpoint := fmt.Sprintf("http://%s:%d", strings.TrimSpace(rr.Stdout.String()), 5000) @@ -205,7 +205,7 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "registry", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("failed to disable registry addon. args %q: %v", rr.Args, err) + t.Errorf("failed to disable registry addon. args %q: %v", rr.Command(), err) } } @@ -232,10 +232,10 @@ func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile strin return err } if rr.Stderr.String() != "" { - t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr) + t.Logf("%v: unexpected stderr: %s", rr.Command(), rr.Stderr) } if !strings.Contains(rr.Stdout.String(), want) { - return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want) + return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want) } return nil } @@ -247,7 +247,7 @@ func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile strin rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "metrics-server", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("failed to disable metrics-server addon: args %q: %v", rr.Args, err) + t.Errorf("failed to disable metrics-server addon: args %q: %v", rr.Command(), err) } } @@ -283,10 +283,10 @@ func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) return err } if rr.Stderr.String() != "" { - t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr) + t.Logf("%v: unexpected stderr: %s", rr.Command(), rr.Stderr) } if !strings.Contains(rr.Stdout.String(), want) { - return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want) + return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want) } return nil } @@ -297,6 +297,6 @@ func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "helm-tiller", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("failed disabling helm-tiller addon. arg %q.s %v", rr.Args, err) + t.Errorf("failed disabling helm-tiller addon. arg %q.s %v", rr.Command(), err) } } diff --git a/test/integration/docker_test.go b/test/integration/docker_test.go index da26300dd5..dd0a27de06 100644 --- a/test/integration/docker_test.go +++ b/test/integration/docker_test.go @@ -39,12 +39,12 @@ func TestDockerFlags(t *testing.T) { args := append([]string{"start", "-p", profile, "--cache-images=false", "--memory=1800", "--install-addons=false", "--wait=false", "--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", "--docker-opt=icc=true", "--alsologtostderr", "-v=5"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("failed to start minikube with args: %q : %v", rr.Args, err) + t.Errorf("failed to start minikube with args: %q : %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo systemctl show docker --property=Environment --no-pager")) if err != nil { - t.Errorf("failed to 'systemctl show docker' inside minikube. args %q: %v", rr.Args, err) + t.Errorf("failed to 'systemctl show docker' inside minikube. args %q: %v", rr.Command(), err) } for _, envVar := range []string{"FOO=BAR", "BAZ=BAT"} { @@ -55,7 +55,7 @@ func TestDockerFlags(t *testing.T) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo systemctl show docker --property=ExecStart --no-pager")) if err != nil { - t.Errorf("failed on the second 'systemctl show docker' inside minikube. args %q: %v", rr.Args, err) + t.Errorf("failed on the second 'systemctl show docker' inside minikube. args %q: %v", rr.Command(), err) } for _, opt := range []string{"--debug", "--icc=true"} { if !strings.Contains(rr.Stdout.String(), opt) { diff --git a/test/integration/fn_mount_cmd.go b/test/integration/fn_mount_cmd.go index 6e292af109..915262a833 100644 --- a/test/integration/fn_mount_cmd.go +++ b/test/integration/fn_mount_cmd.go @@ -117,7 +117,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // Assert that we can access the mount without an error. Display for debugging. rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "--", "ls", "-la", guestMount)) if err != nil { - t.Fatalf("failed verifying accessing to the mount. args %q : %v", rr.Args, err) + t.Fatalf("failed verifying accessing to the mount. args %q : %v", rr.Command(), err) } t.Logf("guest mount directory contents\n%s", rr.Stdout) @@ -125,7 +125,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { tp := filepath.Join("/mount-9p", testMarker) rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "cat", tp)) if err != nil { - t.Fatalf("failed to verify the mount contains unique test marked: args %q: %v", rr.Args, err) + t.Fatalf("failed to verify the mount contains unique test marked: args %q: %v", rr.Command(), err) } if !bytes.Equal(rr.Stdout.Bytes(), wantFromTest) { @@ -136,7 +136,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // Start the "busybox-mount" pod. rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox-mount-test.yaml"))) if err != nil { - t.Fatalf("failed to 'kubectl replace' for busybox-mount-test. args %q : %v", rr.Args, err) + t.Fatalf("failed to 'kubectl replace' for busybox-mount-test. args %q : %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox-mount", Minutes(4)); err != nil { @@ -157,7 +157,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // test that file written from host was read in by the pod via cat /mount-9p/written-by-host; rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "logs", "busybox-mount")) if err != nil { - t.Errorf("failed to get kubectl logs for busybox-mount. args %q : %v", rr.Args, err) + t.Errorf("failed to get kubectl logs for busybox-mount. args %q : %v", rr.Command(), err) } if !bytes.Equal(rr.Stdout.Bytes(), wantFromTest) { t.Errorf("busybox-mount logs = %q, want %q", rr.Stdout.Bytes(), wantFromTest) @@ -169,7 +169,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // test that file written from host was read in by the pod via cat /mount-9p/fromhost; rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "stat", gp)) if err != nil { - t.Errorf("failed to stat the file %q iniside minikube : args %q: %v", gp, rr.Args, err) + t.Errorf("failed to stat the file %q iniside minikube : args %q: %v", gp, rr.Command(), err) } if runtime.GOOS == "windows" { diff --git a/test/integration/fn_pvc.go b/test/integration/fn_pvc.go index c387a1750b..9cca92cc47 100644 --- a/test/integration/fn_pvc.go +++ b/test/integration/fn_pvc.go @@ -64,7 +64,7 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st // Now create a testpvc rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "pvc.yaml"))) if err != nil { - t.Fatalf("kubectl apply pvc.yaml failed: args %q: %v", rr.Args, err) + t.Fatalf("kubectl apply pvc.yaml failed: args %q: %v", rr.Command(), err) } checkStoragePhase := func() error { diff --git a/test/integration/fn_tunnel_cmd.go b/test/integration/fn_tunnel_cmd.go index dffb247fb4..8f43a9a5fd 100644 --- a/test/integration/fn_tunnel_cmd.go +++ b/test/integration/fn_tunnel_cmd.go @@ -69,7 +69,7 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { // Start the "nginx" pod. rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "testsvc.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx-svc", Minutes(4)); err != nil { t.Fatalf("wait: %v", err) @@ -97,7 +97,7 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "svc", "nginx-svc")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } t.Logf("failed to kubectl get svc nginx-svc:\n%s", rr.Stdout) } diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index aa9ac58eb0..a5ee68aa73 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -137,7 +137,7 @@ func TestFunctional(t *testing.T) { func validateNodeLabels(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "nodes", "--output=go-template", "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'")) if err != nil { - t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Args, err) + t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Command(), err) } expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name"} for _, el := range expectedLabels { @@ -167,7 +167,7 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) { c = exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && docker images") rr, err = Run(t, c) if err != nil { - t.Fatalf("failed to run minikube docker-env. args %q : %v ", rr.Args, err) + t.Fatalf("failed to run minikube docker-env. args %q : %v ", rr.Command(), err) } expectedImgInside := "gcr.io/k8s-minikube/storage-provisioner" @@ -192,7 +192,7 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { c.Env = env rr, err := Run(t, c) if err != nil { - t.Errorf("failed minikube start. args %q: %v", rr.Args, err) + t.Errorf("failed minikube start. args %q: %v", rr.Command(), err) } want := "Found network options:" @@ -210,7 +210,7 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { func validateKubeContext(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "config", "current-context")) if err != nil { - t.Errorf("failed to get current-context. args %q : %v", rr.Args, err) + t.Errorf("failed to get current-context. args %q : %v", rr.Command(), err) } if !strings.Contains(rr.Stdout.String(), profile) { t.Errorf("expected current-context = %q, but got *%q*", profile, rr.Stdout.String()) @@ -221,7 +221,7 @@ func validateKubeContext(ctx context.Context, t *testing.T, profile string) { func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "po", "-A")) if err != nil { - t.Errorf("failed to get kubectl pods: args %q : %v", rr.Args, err) + t.Errorf("failed to get kubectl pods: args %q : %v", rr.Command(), err) } if rr.Stderr.String() != "" { t.Errorf("expected stderr to be empty but got *%q*: args %q", rr.Stderr, rr.Command()) @@ -237,7 +237,7 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) kubectlArgs := []string{"-p", profile, "kubectl", "--", "--context", profile, "get", "pods"} rr, err := Run(t, exec.CommandContext(ctx, Target(), kubectlArgs...)) if err != nil { - t.Fatalf("failed to get pods. args %q: %v", rr.Args, err) + t.Fatalf("failed to get pods. args %q: %v", rr.Command(), err) } } @@ -245,12 +245,12 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) func validateComponentHealth(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "cs", "-o=json")) if err != nil { - t.Fatalf("failed to get components. args %q: %v", rr.Args, err) + t.Fatalf("failed to get components. args %q: %v", rr.Command(), err) } cs := api.ComponentStatusList{} d := json.NewDecoder(bytes.NewReader(rr.Stdout.Bytes())) if err := d.Decode(&cs); err != nil { - t.Fatalf("failed to decode kubectl json output: args %q : %v", rr.Args, err) + t.Fatalf("failed to decode kubectl json output: args %q : %v", rr.Command(), err) } for _, i := range cs.Items { @@ -270,41 +270,41 @@ func validateComponentHealth(ctx context.Context, t *testing.T, profile string) func validateStatusCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) if err != nil { - t.Errorf("failed to run minikube status. args %q : %v", rr.Args, err) + t.Errorf("failed to run minikube status. args %q : %v", rr.Command(), err) } // Custom format rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-f", "host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}")) if err != nil { - t.Errorf("failed to run minikube status with custom format: args %q: %v", rr.Args, err) + t.Errorf("failed to run minikube status with custom format: args %q: %v", rr.Command(), err) } re := `host:([A-z]+),kublet:([A-z]+),apiserver:([A-z]+),kubeconfig:([A-z]+)` match, _ := regexp.MatchString(re, rr.Stdout.String()) if !match { - t.Errorf("failed to match regex %q for minikube status with custom format. args %q. output %q", re, rr.Args, rr.Output()) + t.Errorf("failed to match regex %q for minikube status with custom format. args %q. output %q", re, rr.Command(), rr.Output()) } // Json output rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-o", "json")) if err != nil { - t.Errorf("failed to run minikube status with json output. args %q : %v", rr.Args, err) + t.Errorf("failed to run minikube status with json output. args %q : %v", rr.Command(), err) } var jsonObject map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { - t.Errorf("failed to decode json from minikube status. args %q. %v", rr.Args, err) + t.Errorf("failed to decode json from minikube status. args %q. %v", rr.Command(), err) } if _, ok := jsonObject["Host"]; !ok { - t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "Host") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "Host") } if _, ok := jsonObject["Kubelet"]; !ok { - t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "Kubelet") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "Kubelet") } if _, ok := jsonObject["APIServer"]; !ok { - t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "APIServer") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "APIServer") } if _, ok := jsonObject["Kubeconfig"]; !ok { - t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "Kubeconfig") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "Kubeconfig") } } @@ -350,7 +350,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { func validateDNS(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox.yaml"))) if err != nil { - t.Fatalf("failed to kubectl replace busybox : args %q: %v", rr.Args, err) + t.Fatalf("failed to kubectl replace busybox : args %q: %v", rr.Command(), err) } names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4)) @@ -410,21 +410,21 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { for _, img := range []string{"busybox:latest", "busybox:1.28.4-glibc", "k8s.gcr.io/pause:latest"} { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img)) if err != nil { - t.Errorf("failed to cache add image %q. args %q err %v", img, rr.Args, err) + t.Errorf("failed to cache add image %q. args %q err %v", img, rr.Command(), err) } } }) t.Run("delete_busybox:1.28.4-glibc", func(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "busybox:1.28.4-glibc")) if err != nil { - t.Errorf("failed to delete image busybox:1.28.4-glibc from cache. args %q: %v", rr.Args, err) + t.Errorf("failed to delete image busybox:1.28.4-glibc from cache. args %q: %v", rr.Command(), err) } }) t.Run("list", func(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "list")) if err != nil { - t.Errorf("failed to do cache list. args %q: %v", rr.Args, err) + t.Errorf("failed to do cache list. args %q: %v", rr.Command(), err) } if !strings.Contains(rr.Output(), "k8s.gcr.io/pause") { t.Errorf("expected 'cache list' output to include 'k8s.gcr.io/pause' but got:\n ***%q***", rr.Output()) @@ -491,7 +491,7 @@ func validateConfigCmd(ctx context.Context, t *testing.T, profile string) { args := append([]string{"-p", profile, "config"}, tc.args...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil && tc.wantErr == "" { - t.Errorf("failed to config minikube. args %q : %v", rr.Args, err) + t.Errorf("failed to config minikube. args %q : %v", rr.Command(), err) } got := strings.TrimSpace(rr.Stdout.String()) @@ -509,7 +509,7 @@ func validateConfigCmd(ctx context.Context, t *testing.T, profile string) { func validateLogsCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "logs")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } for _, word := range []string{"Docker", "apiserver", "Linux", "kubelet"} { if !strings.Contains(rr.Stdout.String(), word) { @@ -525,16 +525,16 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { nonexistentProfile := "lis" rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", nonexistentProfile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } var profileJSON map[string][]map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &profileJSON) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } for profileK := range profileJSON { for _, p := range profileJSON[profileK] { @@ -550,7 +550,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { // List profiles rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list")) if err != nil { - t.Errorf("failed to list profiles: args %q : %v", rr.Args, err) + t.Errorf("failed to list profiles: args %q : %v", rr.Command(), err) } // Table output @@ -564,7 +564,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { } } if !profileExists { - t.Errorf("expected 'profile list' output to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Args) + t.Errorf("expected 'profile list' output to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Command()) } }) @@ -572,12 +572,12 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { // Json output rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) if err != nil { - t.Errorf("failed to list profiles with json format. args %q: %v", rr.Args, err) + t.Errorf("failed to list profiles with json format. args %q: %v", rr.Command(), err) } var jsonObject map[string][]map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { - t.Errorf("failed to decode json from profile list: args %q: %v", rr.Args, err) + t.Errorf("failed to decode json from profile list: args %q: %v", rr.Command(), err) } validProfiles := jsonObject["valid"] profileExists := false @@ -588,7 +588,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { } } if !profileExists { - t.Errorf("expected the json of 'profile list' to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Args) + t.Errorf("expected the json of 'profile list' to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Command()) } }) @@ -598,11 +598,11 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=gcr.io/hello-minikube-zero-install/hello-node")) if err != nil { - t.Logf("%q failed: %v (may not be an error).", rr.Args, err) + t.Logf("%q failed: %v (may not be an error).", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "expose", "deployment", "hello-node", "--type=NodePort", "--port=8080")) if err != nil { - t.Logf("%q failed: %v (may not be an error)", rr.Args, err) + t.Logf("%q failed: %v (may not be an error)", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "default", "app=hello-node", Minutes(10)); err != nil { @@ -611,7 +611,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "list")) if err != nil { - t.Errorf("failed to do service list. args %q : %v", rr.Args, err) + t.Errorf("failed to do service list. args %q : %v", rr.Command(), err) } if !strings.Contains(rr.Stdout.String(), "hello-node") { t.Errorf("expected 'service list' to contain *hello-node* but got -%q-", rr.Stdout.String()) @@ -620,7 +620,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { // Test --https --url mode rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "--namespace=default", "--https", "--url", "hello-node")) if err != nil { - t.Fatalf("failed to get service url. args %q : %v", rr.Args, err) + t.Fatalf("failed to get service url. args %q : %v", rr.Command(), err) } if rr.Stderr.String() != "" { t.Errorf("expected stderr to be empty but got *%q*", rr.Stderr) @@ -638,16 +638,16 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { // Test --format=IP rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url", "--format={{.IP}}")) if err != nil { - t.Errorf("failed to get service url with custom format. args %q: %v", rr.Args, err) + t.Errorf("failed to get service url with custom format. args %q: %v", rr.Command(), err) } if strings.TrimSpace(rr.Stdout.String()) != u.Hostname() { - t.Errorf("expected 'service --format={{.IP}}' output to be -%q- but got *%q* . args %q.", u.Hostname(), rr.Stdout.String(), rr.Args) + t.Errorf("expected 'service --format={{.IP}}' output to be -%q- but got *%q* . args %q.", u.Hostname(), rr.Stdout.String(), rr.Command()) } // Test a regular URLminikube rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url")) if err != nil { - t.Errorf("failed to get service url. args: %q: %v", rr.Args, err) + t.Errorf("failed to get service url. args: %q: %v", rr.Command(), err) } endpoint = strings.TrimSpace(rr.Stdout.String()) @@ -674,7 +674,7 @@ func validateAddonsCmd(ctx context.Context, t *testing.T, profile string) { // Table output rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list")) if err != nil { - t.Errorf("failed to do addon list: args %q : %v", rr.Args, err) + t.Errorf("failed to do addon list: args %q : %v", rr.Command(), err) } for _, a := range []string{"dashboard", "ingress", "ingress-dns"} { if !strings.Contains(rr.Output(), a) { @@ -685,7 +685,7 @@ func validateAddonsCmd(ctx context.Context, t *testing.T, profile string) { // Json output rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list", "-o", "json")) if err != nil { - t.Errorf("failed to do addon list with json output. args %q: %v", rr.Args, err) + t.Errorf("failed to do addon list with json output. args %q: %v", rr.Command(), err) } var jsonObject map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) @@ -702,10 +702,10 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) { want := "hello\n" rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("echo hello"))) if err != nil { - t.Errorf("failed to run an ssh command. args %q : %v", rr.Args, err) + t.Errorf("failed to run an ssh command. args %q : %v", rr.Command(), err) } if rr.Stdout.String() != want { - t.Errorf("expected minikube ssh command output to be -%q- but got *%q*. args %q", want, rr.Stdout.String(), rr.Args) + t.Errorf("expected minikube ssh command output to be -%q- but got *%q*. args %q", want, rr.Stdout.String(), rr.Command()) } } @@ -713,7 +713,7 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) { func validateMySQL(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "mysql.yaml"))) if err != nil { - t.Fatalf("failed to kubectl replace mysql: args %q failed: %v", rr.Args, err) + t.Fatalf("failed to kubectl replace mysql: args %q failed: %v", rr.Command(), err) } names, err := PodWait(ctx, t, profile, "default", "app=mysql", Minutes(10)) @@ -776,7 +776,7 @@ func validateFileSync(ctx context.Context, t *testing.T, profile string) { t.Logf("Checking for existence of %s within VM", vp) rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat %s", vp))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } got := rr.Stdout.String() t.Logf("file sync test content: %s", got) @@ -813,7 +813,7 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) { t.Logf("Checking for existence of %s within VM", vp) rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat %s", vp))) if err != nil { - t.Errorf("failed to check existence of %q inside minikube. args %q: %v", vp, rr.Args, err) + t.Errorf("failed to check existence of %q inside minikube. args %q: %v", vp, rr.Command(), err) } // Strip carriage returned by ssh @@ -828,7 +828,7 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) { func validateUpdateContextCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "update-context", "--alsologtostderr", "-v=2")) if err != nil { - t.Errorf("failed to run minikube update-context: args %q: %v", rr.Args, err) + t.Errorf("failed to run minikube update-context: args %q: %v", rr.Command(), err) } want := []byte("IP was already correctly configured") diff --git a/test/integration/guest_env_test.go b/test/integration/guest_env_test.go index e59284df60..201d188ec2 100644 --- a/test/integration/guest_env_test.go +++ b/test/integration/guest_env_test.go @@ -38,7 +38,7 @@ func TestGuestEnvironment(t *testing.T) { args := append([]string{"start", "-p", profile, "--install-addons=false", "--memory=1800", "--wait=false"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("failed to start minikube: args %q: %v", rr.Args, err) + t.Errorf("failed to start minikube: args %q: %v", rr.Command(), err) } // Run as a group so that our defer doesn't happen as tests are runnings @@ -49,7 +49,7 @@ func TestGuestEnvironment(t *testing.T) { t.Parallel() rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("which %s", pkg))) if err != nil { - t.Errorf("failed to verify existance of %q binary : args %q: %v", pkg, rr.Args, err) + t.Errorf("failed to verify existance of %q binary : args %q: %v", pkg, rr.Command(), err) } }) } @@ -70,7 +70,7 @@ func TestGuestEnvironment(t *testing.T) { t.Parallel() rr, err := Run(t, exec.CommandContext(ctx, Targt(), "-p", profile, "ssh", fmt.Sprintf("df -t ext4 %s | grep %s", mount, mount))) if err != nil { - t.Errorf("failed to verify existance of %q mount. args %q: %v", mount, rr.Args, err) + t.Errorf("failed to verify existance of %q mount. args %q: %v", mount, rr.Command(), err) } }) } diff --git a/test/integration/gvisor_addon_test.go b/test/integration/gvisor_addon_test.go index 6e20c249c6..d5744eeafe 100644 --- a/test/integration/gvisor_addon_test.go +++ b/test/integration/gvisor_addon_test.go @@ -50,18 +50,18 @@ func TestGvisorAddon(t *testing.T) { startArgs := append([]string{"start", "-p", profile, "--memory=2200", "--container-runtime=containerd", "--docker-opt", "containerd=/var/run/containerd/containerd.sock"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("failed to start minikube: args %q: %v", rr.Args, err) + t.Fatalf("failed to start minikube: args %q: %v", rr.Command(), err) } // If it exists, include a locally built gvisor image rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", "gcr.io/k8s-minikube/gvisor-addon:2")) if err != nil { - t.Logf("%s failed: %v (won't test local image)", rr.Args, err) + t.Logf("%s failed: %v (won't test local image)", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "enable", "gvisor")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "kube-system", "kubernetes.io/minikube-addons=gvisor", Minutes(4)); err != nil { @@ -71,12 +71,12 @@ func TestGvisorAddon(t *testing.T) { // Create an untrusted workload rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-untrusted.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } // Create gvisor workload rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-gvisor.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx,untrusted=true", Minutes(4)); err != nil { @@ -89,12 +89,12 @@ func TestGvisorAddon(t *testing.T) { // Ensure that workloads survive a restart rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile)) if err != nil { - t.Fatalf("faild stopping minikube. args %q : %v", rr.Args, err) + t.Fatalf("faild stopping minikube. args %q : %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("failed starting minikube after a stop. args %q, %v", rr.Args, err) + t.Fatalf("failed starting minikube after a stop. args %q, %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "kube-system", "kubernetes.io/minikube-addons=gvisor", Minutes(4)); err != nil { t.Errorf("failed waiting for 'gvisor controller' pod : %v", err) diff --git a/test/integration/none_test.go b/test/integration/none_test.go index 873465d5ef..ed77814dee 100644 --- a/test/integration/none_test.go +++ b/test/integration/none_test.go @@ -46,22 +46,22 @@ func TestChangeNoneUser(t *testing.T) { startArgs := append([]string{"CHANGE_MINIKUBE_NONE_USER=true", Target(), "start", "--wait=false"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, "/usr/bin/env", startArgs...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, "/usr/bin/env", startArgs...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "status")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } username := os.Getenv("SUDO_USER") diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index 21ca1d57ed..dd106484c9 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -92,7 +92,7 @@ func TestStartStop(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("failed starting minikube -first start-. args %q: %v", rr.Args, err) + t.Fatalf("failed starting minikube -first start-. args %q: %v", rr.Command(), err) } if !strings.Contains(tc.name, "cni") { @@ -101,7 +101,7 @@ func TestStartStop(t *testing.T) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile, "--alsologtostderr", "-v=3")) if err != nil { - t.Errorf("failed stopping minikube - first stop-. args %q : %v", rr.Args, err) + t.Errorf("failed stopping minikube - first stop-. args %q : %v", rr.Command(), err) } // The none driver never really stops @@ -115,13 +115,13 @@ func TestStartStop(t *testing.T) { // Enable an addon to assert it comes up afterwards rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Args, err) + t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { // Explicit fatal so that failures don't move directly to deletion - t.Fatalf("failed to start minikube post-stop. args %q: %v", rr.Args, err) + t.Fatalf("failed to start minikube post-stop. args %q: %v", rr.Command(), err) } if strings.Contains(tc.name, "cni") { @@ -150,7 +150,7 @@ func TestStartStop(t *testing.T) { // Normally handled by cleanuprofile, but not fatal there rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) if err != nil { - t.Errorf("failed to clean up: args %q: %v", rr.Args, err) + t.Errorf("failed to clean up: args %q: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "config", "get-contexts", profile)) @@ -182,14 +182,14 @@ func TestStartStopWithPreload(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } // Now, pull the busybox image into the VMs docker daemon image := "busybox" rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "pull", image)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } // Restart minikube with v1.17.3, which has a preloaded tarball @@ -199,11 +199,11 @@ func TestStartStopWithPreload(t *testing.T) { startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "images")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } if !strings.Contains(rr.Output(), image) { t.Fatalf("Expected to find %s in output of `docker images`, instead got %s", image, rr.Output()) @@ -217,7 +217,7 @@ func testPodScheduling(ctx context.Context, t *testing.T, profile string) { // schedule a pod to assert persistence rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "busybox.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } // 8 minutes, because 4 is not enough for images to pull in all cases. @@ -250,7 +250,7 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version rr, err := Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "sudo crictl images -o json")) if err != nil { - t.Errorf("failed tp get images inside minikube. args %q: %v", rr.Args, err) + t.Errorf("failed tp get images inside minikube. args %q: %v", rr.Command(), err) } jv := map[string][]struct { Tags []string `json:"repoTags"` @@ -293,7 +293,7 @@ func testPause(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "pause", "-p", profile, "--alsologtostderr", "-v=1")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } got := Status(ctx, t, Target(), profile, "APIServer") @@ -308,7 +308,7 @@ func testPause(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "unpause", "-p", profile, "--alsologtostderr", "-v=1")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } got = Status(ctx, t, Target(), profile, "APIServer") diff --git a/test/integration/version_upgrade_test.go b/test/integration/version_upgrade_test.go index 20d131d5ef..2eb4eaabb6 100644 --- a/test/integration/version_upgrade_test.go +++ b/test/integration/version_upgrade_test.go @@ -82,7 +82,7 @@ func TestVersionUpgrade(t *testing.T) { rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), "stop", "-p", profile)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), "-p", profile, "status", "--format={{.Host}}")) @@ -97,7 +97,7 @@ func TestVersionUpgrade(t *testing.T) { args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("failed to start minikube HEAD with newest k8s version. args: %s : %v", rr.Args, err) + t.Errorf("failed to start minikube HEAD with newest k8s version. args: %s : %v", rr.Command(), err) } s, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "version", "--output=json")) @@ -121,12 +121,12 @@ func TestVersionUpgrade(t *testing.T) { args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) if rr, err := Run(t, exec.CommandContext(ctx, tf.Name(), args...)); err == nil { - t.Fatalf("downgrading kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Args) + t.Fatalf("downgrading kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Command()) } args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("start and already started minikube failed. args: %q : %v", rr.Args, err) + t.Errorf("start and already started minikube failed. args: %q : %v", rr.Command(), err) } } From fffac252628281898b757119d3d88e0978c0deea Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 22:45:29 -0700 Subject: [PATCH 271/668] indent test outputs --- test/integration/helpers.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/test/integration/helpers.go b/test/integration/helpers.go index bac683ea87..02298b3a8e 100644 --- a/test/integration/helpers.go +++ b/test/integration/helpers.go @@ -63,14 +63,26 @@ func (rr RunResult) Command() string { return sb.String() } +// indentLines indents every line in a bytes.Buffer and returns it as string +func indentLines(b *bytes.Buffer) string { + scanner := bufio.NewScanner(b) + var lines string + for scanner.Scan() { + lines = lines + "\t" + scanner.Text() + "\n" + } + return lines +} + // Output returns human-readable output for an execution result func (rr RunResult) Output() string { var sb strings.Builder + if rr.Stdout.Len() > 0 { - sb.WriteString(fmt.Sprintf("-- stdout --\n%s\n-- /stdout --", rr.Stdout.Bytes())) + sb.WriteString(fmt.Sprintf("-- stdout --\n%s\n-- /stdout --", indentLines(rr.Stdout))) } + if rr.Stderr.Len() > 0 { - sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", rr.Stderr.Bytes())) + sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", indentLines(rr.Stderr))) } return sb.String() } From 2ced39c74561b3e327e11263817e3b71010ca5fa Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 22:54:43 -0700 Subject: [PATCH 272/668] add \n for stdout too --- test/integration/helpers.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/integration/helpers.go b/test/integration/helpers.go index 02298b3a8e..8ac021862a 100644 --- a/test/integration/helpers.go +++ b/test/integration/helpers.go @@ -76,11 +76,9 @@ func indentLines(b *bytes.Buffer) string { // Output returns human-readable output for an execution result func (rr RunResult) Output() string { var sb strings.Builder - if rr.Stdout.Len() > 0 { - sb.WriteString(fmt.Sprintf("-- stdout --\n%s\n-- /stdout --", indentLines(rr.Stdout))) + sb.WriteString(fmt.Sprintf("\n-- stdout --\n%s\n-- /stdout --", indentLines(rr.Stdout))) } - if rr.Stderr.Len() > 0 { sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", indentLines(rr.Stderr))) } From 0dc9c2af70f5df7024740ef515395ce41e4b6109 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 23:14:10 -0700 Subject: [PATCH 273/668] install kubectl on github action machines --- .github/workflows/main.yml | 48 +++++++++++++++++++++++++++++++++----- 1 file changed, 42 insertions(+), 6 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 4a0bd5b0aa..d5feef452d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -81,13 +81,26 @@ jobs: GOPOGH_RESULT: "" SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-16.04 - steps: + steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl - name: Docker Info shell: bash run: | - docker info || true + echo "--------------------------" docker version || true + echo "--------------------------" + docker info || true + echo "--------------------------" + docker system df || true + echo "--------------------------" + docker system info || true + echo "--------------------------" docker ps || true + echo "--------------------------" - name: Install lz4 shell: bash run: | @@ -157,6 +170,11 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 needs: [build_minikube] steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl - name: Install lz4 shell: bash run: | @@ -165,9 +183,17 @@ jobs: - name: Docker Info shell: bash run: | - docker info || true + echo "--------------------------" docker version || true + echo "--------------------------" + docker info || true + echo "--------------------------" + docker system df || true + echo "--------------------------" + docker system info || true + echo "--------------------------" docker ps || true + echo "--------------------------" - name: Install gopogh shell: bash run: | @@ -232,6 +258,11 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-16.04 steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl # conntrack is required for kubernetes 1.18 and higher # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon - name: Install tools for none @@ -304,6 +335,11 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl # conntrack is required for kubernetes 1.18 and higher # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon - name: Install tools for none @@ -376,11 +412,11 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: - - name: Install lz4 + - name: Install kubectl shell: bash run: | - sudo apt-get update -qq - sudo apt-get -qq -y install liblz4-tool + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl - name: Install podman shell: bash run: | From 9229b35da97211504477fb8356e07fdb36db200c Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 23:34:05 -0700 Subject: [PATCH 274/668] print kubectl version to make sure it is installed --- .github/workflows/main.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d5feef452d..0fb9d83185 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -87,6 +87,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl + kubectl version - name: Docker Info shell: bash run: | @@ -175,6 +176,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl + kubectl version - name: Install lz4 shell: bash run: | @@ -263,6 +265,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl + kubectl version # conntrack is required for kubernetes 1.18 and higher # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon - name: Install tools for none @@ -340,6 +343,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl + kubectl version # conntrack is required for kubernetes 1.18 and higher # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon - name: Install tools for none @@ -417,6 +421,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl + kubectl version - name: Install podman shell: bash run: | From d2a7b8b748d10bd1ea64670d7853804e3504b76f Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 23:45:27 -0700 Subject: [PATCH 275/668] kubectl version --- .github/workflows/main.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 0fb9d83185..1ce020cbbe 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -87,7 +87,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl - kubectl version + kubectl version --client=true - name: Docker Info shell: bash run: | @@ -176,7 +176,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl - kubectl version + kubectl version --client=true - name: Install lz4 shell: bash run: | @@ -265,7 +265,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl - kubectl version + kubectl version --client=true # conntrack is required for kubernetes 1.18 and higher # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon - name: Install tools for none @@ -343,7 +343,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl - kubectl version + kubectl version --client=true # conntrack is required for kubernetes 1.18 and higher # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon - name: Install tools for none @@ -421,7 +421,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl - kubectl version + kubectl version --client=true - name: Install podman shell: bash run: | From 1c8d5806825ecbaeeb752080fd89824450699815 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:14:07 -0700 Subject: [PATCH 276/668] add docs for docker driver --- .../en/docs/Reference/Drivers/docker.md | 21 ++++-------- .../en/docs/Reference/Drivers/hyperv.md | 1 - .../Drivers/includes/check_baremetal.inc | 7 ++++ .../Drivers/includes/check_container.inc | 9 ++++++ .../includes/check_virtualization_linux.inc | 11 +++++++ .../includes/check_virtualization_windows.inc | 19 +++++++++++ .../Drivers/includes/docker_usage.inc | 16 ++++++++++ .../content/en/docs/Reference/Drivers/kvm2.md | 5 +++ site/content/en/docs/Start/linux.md | 28 +++++++++------- site/content/en/docs/Start/macos.md | 3 ++ site/content/en/docs/Start/windows.md | 32 ++++++------------- 11 files changed, 102 insertions(+), 50 deletions(-) create mode 100644 site/content/en/docs/Reference/Drivers/includes/check_baremetal.inc create mode 100644 site/content/en/docs/Reference/Drivers/includes/check_container.inc create mode 100644 site/content/en/docs/Reference/Drivers/includes/check_virtualization_linux.inc create mode 100644 site/content/en/docs/Reference/Drivers/includes/check_virtualization_windows.inc create mode 100644 site/content/en/docs/Reference/Drivers/includes/docker_usage.inc diff --git a/site/content/en/docs/Reference/Drivers/docker.md b/site/content/en/docs/Reference/Drivers/docker.md index f44261fbad..2fcd751123 100644 --- a/site/content/en/docs/Reference/Drivers/docker.md +++ b/site/content/en/docs/Reference/Drivers/docker.md @@ -4,28 +4,19 @@ linkTitle: "docker" weight: 3 date: 2020-02-05 description: > - Docker driver (EXPERIMENTAL) + Docker driver --- ## Overview -The Docker driver is an experimental VM-free driver that ships with minikube v1.7. +The Docker driver is a VM-free driver. + +{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}} -This driver was inspired by the [kind project](https://kind.sigs.k8s.io/), and uses a modified version of its base image. ## Special features -No hypervisor required when run on Linux. +- Cross platform (linux, macos, windows) +- No hypervisor required when run on Linux. -## Limitations -As an experimental driver, not all commands are supported on all platforms. Notably: `mount,` `service`, `tunnel`, and others. Most of these limitations will be addressed by minikube v1.8 (March 2020) - -## Issues - -* [Full list of open 'kic-driver' issues](https://github.com/kubernetes/minikube/labels/co%2Fkic-driver) - -## Troubleshooting - -* Run `minikube start --alsologtostderr -v=1` to debug crashes -* If your docker is too slow on mac os try [Improving docker performance](https://docs.docker.com/docker-for-mac/osxfs-caching/) diff --git a/site/content/en/docs/Reference/Drivers/hyperv.md b/site/content/en/docs/Reference/Drivers/hyperv.md index 909f1e03f8..3595a2bb9d 100644 --- a/site/content/en/docs/Reference/Drivers/hyperv.md +++ b/site/content/en/docs/Reference/Drivers/hyperv.md @@ -7,7 +7,6 @@ date: 2018-08-05 description: > Microsoft Hyper-V driver --- - ## Overview Hyper-V is a native hypervisor built in to modern versions of Microsoft Windows. diff --git a/site/content/en/docs/Reference/Drivers/includes/check_baremetal.inc b/site/content/en/docs/Reference/Drivers/includes/check_baremetal.inc new file mode 100644 index 0000000000..da6797ff8c --- /dev/null +++ b/site/content/en/docs/Reference/Drivers/includes/check_baremetal.inc @@ -0,0 +1,7 @@ +To use baremetal driver (none driver). verify that your operating system is Linux and also have 'systemd' installed. + +```shell +pidof systemd && echo "yes" || echo "no" +``` +If the above command outputs "no": +Your system is not suitable for none driver. \ No newline at end of file diff --git a/site/content/en/docs/Reference/Drivers/includes/check_container.inc b/site/content/en/docs/Reference/Drivers/includes/check_container.inc new file mode 100644 index 0000000000..c189d8a2ac --- /dev/null +++ b/site/content/en/docs/Reference/Drivers/includes/check_container.inc @@ -0,0 +1,9 @@ +To use container drivers, verify that your system has either have 'docker' or 'podman' installed. +```shell +docker version +``` +or + +```shell +podman version +``` \ No newline at end of file diff --git a/site/content/en/docs/Reference/Drivers/includes/check_virtualization_linux.inc b/site/content/en/docs/Reference/Drivers/includes/check_virtualization_linux.inc new file mode 100644 index 0000000000..3f60068016 --- /dev/null +++ b/site/content/en/docs/Reference/Drivers/includes/check_virtualization_linux.inc @@ -0,0 +1,11 @@ +To use VM drivers, verify that your system has virtualization support enabled: + +```shell +egrep -q 'vmx|svm' /proc/cpuinfo && echo yes || echo no +``` + +If the above command outputs "no": + +- If you are running within a VM, your hypervisor does not allow nested virtualization. You will need to use the *None (bare-metal)* driver +- If you are running on a physical machine, ensure that your BIOS has hardware virtualization enabled + diff --git a/site/content/en/docs/Reference/Drivers/includes/check_virtualization_windows.inc b/site/content/en/docs/Reference/Drivers/includes/check_virtualization_windows.inc new file mode 100644 index 0000000000..14812b61ec --- /dev/null +++ b/site/content/en/docs/Reference/Drivers/includes/check_virtualization_windows.inc @@ -0,0 +1,19 @@ +To check if virtualization is supported, run the following command on your Windows terminal or command prompt. + +```shell +systeminfo +``` +If you see the following output, virtualization is supported: + +```shell +Hyper-V Requirements: VM Monitor Mode Extensions: Yes + Virtualization Enabled In Firmware: Yes + Second Level Address Translation: Yes + Data Execution Prevention Available: Yes +``` + +If you see the following output, your system already has a Hypervisor installed and you can skip the next step. + +```shell +Hyper-V Requirements: A hypervisor has been detected. +``` \ No newline at end of file diff --git a/site/content/en/docs/Reference/Drivers/includes/docker_usage.inc b/site/content/en/docs/Reference/Drivers/includes/docker_usage.inc new file mode 100644 index 0000000000..df96d517ec --- /dev/null +++ b/site/content/en/docs/Reference/Drivers/includes/docker_usage.inc @@ -0,0 +1,16 @@ +## Install Docker + +- [Docker Desktop](https://hub.docker.com/search?q=&type=edition&offering=community&sort=updated_at&order=desc) + +## Usage + +Start a cluster using the docker driver: + +```shell +minikube start --driver=docker +``` +To make docker the default driver: + +```shell +minikube config set driver docker +``` diff --git a/site/content/en/docs/Reference/Drivers/kvm2.md b/site/content/en/docs/Reference/Drivers/kvm2.md index df13a3f95d..7e7c80a081 100644 --- a/site/content/en/docs/Reference/Drivers/kvm2.md +++ b/site/content/en/docs/Reference/Drivers/kvm2.md @@ -8,12 +8,17 @@ description: > Linux KVM (Kernel-based Virtual Machine) driver --- + ## Overview [KVM (Kernel-based Virtual Machine)](https://www.linux-kvm.org/page/Main_Page) is a full virtualization solution for Linux on x86 hardware containing virtualization extensions. To work with KVM, minikube uses the [libvirt virtualization API](https://libvirt.org/) {{% readfile file="/docs/Reference/Drivers/includes/kvm2_usage.inc" %}} +## Check virtualization support + +{{% readfile file="/docs/Reference/Drivers/includes/virtualization_check_linux.inc" %}} + ## Special features The `minikube start` command supports 3 additional kvm specific flags: diff --git a/site/content/en/docs/Start/linux.md b/site/content/en/docs/Start/linux.md index b52b1690d8..8534cff5e2 100644 --- a/site/content/en/docs/Start/linux.md +++ b/site/content/en/docs/Start/linux.md @@ -39,28 +39,32 @@ curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-{{< la {{% /tab %}} {{% /tabs %}} -## Hypervisor Setup - -Verify that your system has virtualization support enabled: - -```shell -egrep -q 'vmx|svm' /proc/cpuinfo && echo yes || echo no -``` - -If the above command outputs "no": - -- If you are running within a VM, your hypervisor does not allow nested virtualization. You will need to use the *None (bare-metal)* driver -- If you are running on a physical machine, ensure that your BIOS has hardware virtualization enabled +## Driver Setup {{% tabs %}} +{{% tab "Docker" %}} +## Check container support +{{% readfile file="/docs/Reference/Drivers/includes/check_container.inc" %}} + +{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}} +{{% /tab %}} {{% tab "KVM" %}} +## Check virtualization support +{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_linux.inc" %}} + {{% readfile file="/docs/Reference/Drivers/includes/kvm2_usage.inc" %}} {{% /tab %}} {{% tab "VirtualBox" %}} +## Check virtualization support +{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_linux.inc" %}} + {{% readfile file="/docs/Reference/Drivers/includes/virtualbox_usage.inc" %}} {{% /tab %}} {{% tab "None (bare-metal)" %}} +## Check baremetal support +{{% readfile file="/docs/Reference/Drivers/includes/check_baremetal_linux.inc" %}} + If you are already running minikube from inside a VM, it is possible to skip the creation of an additional VM layer by using the `none` driver. {{% readfile file="/docs/Reference/Drivers/includes/none_usage.inc" %}} diff --git a/site/content/en/docs/Start/macos.md b/site/content/en/docs/Start/macos.md index 3c41e3a9b9..1051d8a1a1 100644 --- a/site/content/en/docs/Start/macos.md +++ b/site/content/en/docs/Start/macos.md @@ -50,6 +50,9 @@ brew upgrade minikube ## Hypervisor Setup {{% tabs %}} +{{% tab "Docker" %}} +{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}} +{{% /tab %}} {{% tab "Hyperkit" %}} {{% readfile file="/docs/Reference/Drivers/includes/hyperkit_usage.inc" %}} {{% /tab %}} diff --git a/site/content/en/docs/Start/windows.md b/site/content/en/docs/Start/windows.md index 02ff403f99..2fe1d79a5f 100644 --- a/site/content/en/docs/Start/windows.md +++ b/site/content/en/docs/Start/windows.md @@ -7,8 +7,6 @@ weight: 3 ### Prerequisites * Windows 8 or above -* A hypervisor, such as Hyper-V or VirtualBox -* Hardware virtualization support must be enabled in BIOS * 4GB of RAM ### Installation @@ -30,33 +28,23 @@ After it has installed, close the current CLI session and reopen it. minikube sh {{% /tab %}} {{% /tabs %}} -## Hypervisor Setup -To check if virtualization is supported, run the following command on your Windows terminal or command prompt. - -```shell -systeminfo -``` -If you see the following output, virtualization is supported: - -```shell -Hyper-V Requirements: VM Monitor Mode Extensions: Yes - Virtualization Enabled In Firmware: Yes - Second Level Address Translation: Yes - Data Execution Prevention Available: Yes -``` - -If you see the following output, your system already has a Hypervisor installed and you can skip the next step. - -```shell -Hyper-V Requirements: A hypervisor has been detected. -``` {{% tabs %}} +{{% tab "Docker" %}} +{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}} +{{% /tab %}} + {{% tab "Hyper-V" %}} +## Check Hypervisor +{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_windows.inc" %}} + {{% readfile file="/docs/Reference/Drivers/includes/hyperv_usage.inc" %}} {{% /tab %}} {{% tab "VirtualBox" %}} +## Check Hypervisor +{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_windows.inc" %}} + {{% readfile file="/docs/Reference/Drivers/includes/virtualbox_usage.inc" %}} {{% /tab %}} {{% /tabs %}} From 4f2d3de0552ca435d868f7aafbc15a3b37ae86dd Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:16:16 -0700 Subject: [PATCH 277/668] rename inc page --- .../includes/{check_baremetal.inc => check_baremetal_linux.inc} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename site/content/en/docs/Reference/Drivers/includes/{check_baremetal.inc => check_baremetal_linux.inc} (100%) diff --git a/site/content/en/docs/Reference/Drivers/includes/check_baremetal.inc b/site/content/en/docs/Reference/Drivers/includes/check_baremetal_linux.inc similarity index 100% rename from site/content/en/docs/Reference/Drivers/includes/check_baremetal.inc rename to site/content/en/docs/Reference/Drivers/includes/check_baremetal_linux.inc From 6006e1416f49096ac859d9a64cc8a19b12b0908a Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:17:52 -0700 Subject: [PATCH 278/668] rename broken link --- site/content/en/docs/Reference/Drivers/kvm2.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/site/content/en/docs/Reference/Drivers/kvm2.md b/site/content/en/docs/Reference/Drivers/kvm2.md index 7e7c80a081..52102bb83e 100644 --- a/site/content/en/docs/Reference/Drivers/kvm2.md +++ b/site/content/en/docs/Reference/Drivers/kvm2.md @@ -17,7 +17,7 @@ description: > ## Check virtualization support -{{% readfile file="/docs/Reference/Drivers/includes/virtualization_check_linux.inc" %}} +{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_linux.inc" %}} ## Special features From 7d9d62bef690f13cc4055f14c7c39952c0690283 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:21:30 -0700 Subject: [PATCH 279/668] fix the heading sizer --- .../docs/Reference/Drivers/includes/check_container.inc | 9 --------- site/content/en/docs/Start/includes/post_install.inc | 2 +- site/content/en/docs/Start/linux.md | 2 -- 3 files changed, 1 insertion(+), 12 deletions(-) delete mode 100644 site/content/en/docs/Reference/Drivers/includes/check_container.inc diff --git a/site/content/en/docs/Reference/Drivers/includes/check_container.inc b/site/content/en/docs/Reference/Drivers/includes/check_container.inc deleted file mode 100644 index c189d8a2ac..0000000000 --- a/site/content/en/docs/Reference/Drivers/includes/check_container.inc +++ /dev/null @@ -1,9 +0,0 @@ -To use container drivers, verify that your system has either have 'docker' or 'podman' installed. -```shell -docker version -``` -or - -```shell -podman version -``` \ No newline at end of file diff --git a/site/content/en/docs/Start/includes/post_install.inc b/site/content/en/docs/Start/includes/post_install.inc index 5f28d307cf..595eda57d8 100644 --- a/site/content/en/docs/Start/includes/post_install.inc +++ b/site/content/en/docs/Start/includes/post_install.inc @@ -1,4 +1,4 @@ -### Getting to know Kubernetes +## Getting to know Kubernetes Once started, you can use any regular Kubernetes command to interact with your minikube cluster. For example, you can see the pod states by running: diff --git a/site/content/en/docs/Start/linux.md b/site/content/en/docs/Start/linux.md index 8534cff5e2..454c7c4fe5 100644 --- a/site/content/en/docs/Start/linux.md +++ b/site/content/en/docs/Start/linux.md @@ -44,8 +44,6 @@ curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-{{< la {{% tabs %}} {{% tab "Docker" %}} ## Check container support -{{% readfile file="/docs/Reference/Drivers/includes/check_container.inc" %}} - {{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}} {{% /tab %}} From 6654d4e033fbfa357a2b66622d2d46fb9293e531 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:27:23 -0700 Subject: [PATCH 280/668] update docs about memory auto select --- site/content/en/docs/Start/includes/post_install.inc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/site/content/en/docs/Start/includes/post_install.inc b/site/content/en/docs/Start/includes/post_install.inc index 595eda57d8..03ed41bb61 100644 --- a/site/content/en/docs/Start/includes/post_install.inc +++ b/site/content/en/docs/Start/includes/post_install.inc @@ -6,16 +6,16 @@ Once started, you can use any regular Kubernetes command to interact with your m kubectl get po -A ``` -### Increasing memory allocation +## Increasing memory allocation -minikube only allocates 2GB of RAM by default, which is only enough for trivial deployments. For larger +minikube auto-selects the memory size based on your system up to 6000mb. For larger deployments, increase the memory allocation using the `--memory` flag, or make the setting persistent using: ```shell -minikube config set memory 4096 +minikube config set memory 8096 ``` -### Where to go next? +## Where to go next? Visit the [examples](/docs/examples) page to get an idea of what you can do with minikube. From 844e1a083dfdf387fb05d829e5b95aaf9419df38 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:41:20 -0700 Subject: [PATCH 281/668] add podman driver docs --- .../en/docs/Reference/Drivers/docker.md | 3 +-- .../Drivers/includes/podman_usage.inc | 16 ++++++++++++ .../en/docs/Reference/Drivers/podman.md | 26 +++++++++++++++++++ site/content/en/docs/Start/linux.md | 5 ++++ 4 files changed, 48 insertions(+), 2 deletions(-) create mode 100644 site/content/en/docs/Reference/Drivers/includes/podman_usage.inc create mode 100644 site/content/en/docs/Reference/Drivers/podman.md diff --git a/site/content/en/docs/Reference/Drivers/docker.md b/site/content/en/docs/Reference/Drivers/docker.md index 2fcd751123..17f3782504 100644 --- a/site/content/en/docs/Reference/Drivers/docker.md +++ b/site/content/en/docs/Reference/Drivers/docker.md @@ -9,13 +9,12 @@ description: > ## Overview -The Docker driver is a VM-free driver. +The Docker driver is the newest minikube driver, which runs kubernetes in container with full feature parity with VM minikube. {{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}} ## Special features - - Cross platform (linux, macos, windows) - No hypervisor required when run on Linux. diff --git a/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc b/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc new file mode 100644 index 0000000000..9327e9d901 --- /dev/null +++ b/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc @@ -0,0 +1,16 @@ +## Install Podman + +- [Podman](https://podman.io/getting-started/installation.html) + +## Usage + +Start a cluster using the docker driver: + +```shell +minikube start --driver=podman +``` +To make docker the default driver: + +```shell +minikube config set driver podman +``` diff --git a/site/content/en/docs/Reference/Drivers/podman.md b/site/content/en/docs/Reference/Drivers/podman.md new file mode 100644 index 0000000000..7ef9657c1d --- /dev/null +++ b/site/content/en/docs/Reference/Drivers/podman.md @@ -0,0 +1,26 @@ +--- +title: "podman" +linkTitle: "podman" +weight: 3 +date: 2020-03-26 +description: > + Podman driver +--- + +## Overview + +The podman driver is another kubernetes in container driver for minikube. simmilar to [docker](docs/reference/drivers/docker) driver. +podman driver is currently experimental. +and only supported on Linux and MacOs (with a remote podman server) + + +## try with CRI-O run time. +```shell +minikube start --driver=podman --container-runtime=cri-o +``` + + +{{% readfile file="/docs/Reference/Drivers/includes/podman_usage.inc" %}} + + + diff --git a/site/content/en/docs/Start/linux.md b/site/content/en/docs/Start/linux.md index 454c7c4fe5..1e4fd4a9b1 100644 --- a/site/content/en/docs/Start/linux.md +++ b/site/content/en/docs/Start/linux.md @@ -67,6 +67,11 @@ If you are already running minikube from inside a VM, it is possible to skip the {{% readfile file="/docs/Reference/Drivers/includes/none_usage.inc" %}} {{% /tab %}} +{{% tab "Podman (experimental)" %}} +{{% readfile file="/docs/Reference/Drivers/includes/podman_usage.inc" %}} +{{% /tab %}} + + {{% /tabs %}} {{% readfile file="/docs/Start/includes/post_install.inc" %}} From 2635b4b0d759fc2cf24cfd1e0915b776da677780 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:44:48 -0700 Subject: [PATCH 282/668] fix wording --- site/content/en/docs/Reference/Drivers/docker.md | 2 +- .../en/docs/Reference/Drivers/includes/podman_usage.inc | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/site/content/en/docs/Reference/Drivers/docker.md b/site/content/en/docs/Reference/Drivers/docker.md index 17f3782504..afd0a970b7 100644 --- a/site/content/en/docs/Reference/Drivers/docker.md +++ b/site/content/en/docs/Reference/Drivers/docker.md @@ -9,7 +9,7 @@ description: > ## Overview -The Docker driver is the newest minikube driver, which runs kubernetes in container with full feature parity with VM minikube. +The Docker driver is the newest minikube driver. which runs kubernetes in container VM-free ! with full feature parity with minikube in VM. {{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}} diff --git a/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc b/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc index 9327e9d901..76720f262a 100644 --- a/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc +++ b/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc @@ -1,3 +1,8 @@ +## experimental + +This is an experimental driver. please use it only for experimental reasons. +for a better kubernetes in container experience, use docker [driver](https://5e7c6ab90d754e000860cdfd--kubernetes-sigs-minikube.netlify.com/docs/reference/drivers/docker/). + ## Install Podman - [Podman](https://podman.io/getting-started/installation.html) From b7742190ab45a94204f90a7f95e1722d3ff64662 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:46:56 -0700 Subject: [PATCH 283/668] podman wording --- site/content/en/docs/Reference/Drivers/podman.md | 2 +- site/content/en/docs/Start/macos.md | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/site/content/en/docs/Reference/Drivers/podman.md b/site/content/en/docs/Reference/Drivers/podman.md index 7ef9657c1d..2b83cf3ea6 100644 --- a/site/content/en/docs/Reference/Drivers/podman.md +++ b/site/content/en/docs/Reference/Drivers/podman.md @@ -14,7 +14,7 @@ podman driver is currently experimental. and only supported on Linux and MacOs (with a remote podman server) -## try with CRI-O run time. +## Try it with CRI-O container runtime. ```shell minikube start --driver=podman --container-runtime=cri-o ``` diff --git a/site/content/en/docs/Start/macos.md b/site/content/en/docs/Start/macos.md index 1051d8a1a1..b23a81cf13 100644 --- a/site/content/en/docs/Start/macos.md +++ b/site/content/en/docs/Start/macos.md @@ -65,6 +65,9 @@ brew upgrade minikube {{% tab "VMware" %}} {{% readfile file="/docs/Reference/Drivers/includes/vmware_macos_usage.inc" %}} {{% /tab %}} +{{% tab "Podman (experimental)" %}} +{{% readfile file="/docs/Reference/Drivers/includes/podman_usage.inc" %}} +{{% /tab %}} {{% /tabs %}} From 26bdbfe11de823abb7c5c7da3d2aefc3aadf2ff5 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 02:01:57 -0700 Subject: [PATCH 284/668] update references to docker podman --- site/content/en/_index.html | 8 +++++--- site/content/en/docs/Contributing/triage.md | 1 + .../en/docs/Reference/Drivers/includes/podman_usage.inc | 2 +- site/content/en/docs/Reference/Drivers/podman.md | 2 +- site/content/en/docs/Reference/disk_cache.md | 2 +- site/content/en/docs/Reference/persistent_volumes.md | 2 +- site/content/en/docs/Tutorials/continuous_integration.md | 8 ++++++-- site/content/en/docs/Tutorials/nvidia_gpu.md | 2 +- .../en/docs/Tutorials/untrusted_root_certificate.md | 6 +++--- 9 files changed, 20 insertions(+), 13 deletions(-) diff --git a/site/content/en/_index.html b/site/content/en/_index.html index 8687b84c25..05ade99efe 100644 --- a/site/content/en/_index.html +++ b/site/content/en/_index.html @@ -83,12 +83,14 @@ A single command away from reproducing your production environment, from the com {{% /blocks/feature %}} {{% blocks/feature icon="fa-thumbs-up" title="Cross-platform" %}} -- Bare-metal -- HyperKit -- Hyper-V - KVM +- Docker +- HyperKit +- Bare-metal - VirtualBox +- Hyper-V - VMware +- Podman {{% /blocks/feature %}} {{< /blocks/section >}} diff --git a/site/content/en/docs/Contributing/triage.md b/site/content/en/docs/Contributing/triage.md index 318312107b..93b3c403e5 100644 --- a/site/content/en/docs/Contributing/triage.md +++ b/site/content/en/docs/Contributing/triage.md @@ -62,6 +62,7 @@ If the issue is specific to an operating system, hypervisor, container, addon, o - `co/kvm2` - `co/none-driver` - `co/docker-driver` + - `co/podman-driver` - `co/virtualbox` **co/[kubernetes component]** - When the issue appears specific to a k8s component diff --git a/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc b/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc index 76720f262a..9962043165 100644 --- a/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc +++ b/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc @@ -1,7 +1,7 @@ ## experimental This is an experimental driver. please use it only for experimental reasons. -for a better kubernetes in container experience, use docker [driver](https://5e7c6ab90d754e000860cdfd--kubernetes-sigs-minikube.netlify.com/docs/reference/drivers/docker/). +for a better kubernetes in container experience, use docker [driver](https://minikube.sigs.k8s.io/docs/reference/drivers/docker). ## Install Podman diff --git a/site/content/en/docs/Reference/Drivers/podman.md b/site/content/en/docs/Reference/Drivers/podman.md index 2b83cf3ea6..17425b1e58 100644 --- a/site/content/en/docs/Reference/Drivers/podman.md +++ b/site/content/en/docs/Reference/Drivers/podman.md @@ -9,7 +9,7 @@ description: > ## Overview -The podman driver is another kubernetes in container driver for minikube. simmilar to [docker](docs/reference/drivers/docker) driver. +The podman driver is another kubernetes in container driver for minikube. simmilar to [docker](https://minikube.sigs.k8s.io/docs/reference/drivers/docker/) driver. podman driver is currently experimental. and only supported on Linux and MacOs (with a remote podman server) diff --git a/site/content/en/docs/Reference/disk_cache.md b/site/content/en/docs/Reference/disk_cache.md index 84d43112ef..24590727d5 100644 --- a/site/content/en/docs/Reference/disk_cache.md +++ b/site/content/en/docs/Reference/disk_cache.md @@ -4,7 +4,7 @@ linkTitle: "Disk cache" weight: 6 date: 2019-08-01 description: > - Cache Rules Everything Around Minikube + Cache Rules Everything Around minikube --- minikube has built-in support for caching downloaded resources into `$MINIKUBE_HOME/cache`. Here are the important file locations: diff --git a/site/content/en/docs/Reference/persistent_volumes.md b/site/content/en/docs/Reference/persistent_volumes.md index 6b7a38b83b..02e90bc703 100644 --- a/site/content/en/docs/Reference/persistent_volumes.md +++ b/site/content/en/docs/Reference/persistent_volumes.md @@ -7,7 +7,7 @@ description: > About persistent volumes (hostPath) --- -minikube supports [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) of type `hostPath` out of the box. These PersistentVolumes are mapped to a directory inside the running Minikube instance (usually a VM, unless you use `--driver=none`, `--driver=docker`, or `--driver=podman`). For more information on how this works, read the Dynamic Provisioning section below. +minikube supports [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) of type `hostPath` out of the box. These PersistentVolumes are mapped to a directory inside the running minikube instance (usually a VM, unless you use `--driver=none`, `--driver=docker`, or `--driver=podman`). For more information on how this works, read the Dynamic Provisioning section below. ## A note on mounts, persistence, and minikube hosts diff --git a/site/content/en/docs/Tutorials/continuous_integration.md b/site/content/en/docs/Tutorials/continuous_integration.md index 4dad6cc9ed..1a55a58150 100644 --- a/site/content/en/docs/Tutorials/continuous_integration.md +++ b/site/content/en/docs/Tutorials/continuous_integration.md @@ -9,13 +9,13 @@ description: > ## Overview -Most continuous integration environments are already running inside a VM, and may not support nested virtualization. The `none` driver was designed for this use case. +Most continuous integration environments are already running inside a VM, and may not support nested virtualization. The `none` driver was designed for this use case. or you could alternatively use the [Docker](https://minikube.sigs.k8s.io/docs/reference/drivers/docker). ## Prerequisites - VM running a systemd based Linux distribution -## Tutorial +## using none driver Here is an example, that runs minikube from a non-root user, and ensures that the latest stable kubectl is installed: @@ -39,3 +39,7 @@ touch $KUBECONFIG sudo -E minikube start --driver=none ``` + +## Alternative ways + +you could alternatively use minikube's container drivers such as [Docker](https://minikube.sigs.k8s.io/docs/reference/drivers/docker) or [Podman](https://minikube.sigs.k8s.io/docs/reference/drivers/podman). \ No newline at end of file diff --git a/site/content/en/docs/Tutorials/nvidia_gpu.md b/site/content/en/docs/Tutorials/nvidia_gpu.md index 68846ae15b..4e9f561490 100644 --- a/site/content/en/docs/Tutorials/nvidia_gpu.md +++ b/site/content/en/docs/Tutorials/nvidia_gpu.md @@ -98,7 +98,7 @@ to expose GPUs with `--driver=kvm2`. Please don't mix these instructions. ## Why does minikube not support NVIDIA GPUs on macOS? -VM drivers supported by minikube for macOS doesn't support GPU passthrough: +drivers supported by minikube for macOS doesn't support GPU passthrough: - [mist64/xhyve#108](https://github.com/mist64/xhyve/issues/108) - [moby/hyperkit#159](https://github.com/moby/hyperkit/issues/159) diff --git a/site/content/en/docs/Tutorials/untrusted_root_certificate.md b/site/content/en/docs/Tutorials/untrusted_root_certificate.md index 77093d76f3..d1b857aa73 100644 --- a/site/content/en/docs/Tutorials/untrusted_root_certificate.md +++ b/site/content/en/docs/Tutorials/untrusted_root_certificate.md @@ -11,7 +11,7 @@ description: > Most organizations deploy their own Root Certificate and CA service inside the corporate networks. Internal websites, image repositories and other resources may install SSL server certificates issued by this CA service for security and privacy concerns. -You may install the Root Certificate into the minikube VM to access these corporate resources within the cluster. +You may install the Root Certificate into the minikube cluster to access these corporate resources within the cluster. ## Prerequisites @@ -26,13 +26,13 @@ You may install the Root Certificate into the minikube VM to access these corpor openssl x509 -inform der -in my_company.cer -out my_company.pem ``` -* You may need to delete existing minikube VM +* You may need to delete existing minikube cluster ```shell minikube delete ``` -* Copy the certificate before creating the minikube VM +* Copy the certificate before creating the minikube cluster ```shell mkdir -p $HOME/.minikube/certs From 132d1459eb6de47e72a6f60bc95b443f9b2636d9 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 02:09:09 -0700 Subject: [PATCH 285/668] try local search for the site --- site/config.toml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/site/config.toml b/site/config.toml index 091e83fe83..23f8db88b6 100644 --- a/site/config.toml +++ b/site/config.toml @@ -112,7 +112,10 @@ github_project_repo = "" github_subdir = "site" # Google Custom Search Engine ID. Remove or comment out to disable search. -gcs_engine_id = "005331096405080631692:s7c4yfpw9sy" +# gcs_engine_id = "005331096405080631692:s7c4yfpw9sy" + +# enabling local search https://www.docsy.dev/docs/adding-content/navigation/#configure-local-search-with-lunr +offlineSearch = true # User interface configuration [params.ui] From 78c8d0245cb8c5a6dcea389f9bb3c88468e67736 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 02:45:01 -0700 Subject: [PATCH 286/668] update docsy theme submodule --- site/themes/docsy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/site/themes/docsy b/site/themes/docsy index 493bb1a0af..3123298f5b 160000 --- a/site/themes/docsy +++ b/site/themes/docsy @@ -1 +1 @@ -Subproject commit 493bb1a0af92d1242f8396aeb1661dcd3a010db7 +Subproject commit 3123298f5b0f56b3315b55319e17a8fa6c9d98f9 From 7e799c6be34eb434fc17e6de7fed4732b0d1275b Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 03:00:56 -0700 Subject: [PATCH 287/668] add comment to Makefile for updating docsy --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f7b45a07ed..43bb6b9cf2 100755 --- a/Makefile +++ b/Makefile @@ -625,7 +625,7 @@ release-kvm-driver: install-kvm-driver checksum ## Release KVM Driver gsutil cp $(GOBIN)/docker-machine-driver-kvm2 gs://minikube/drivers/kvm/$(VERSION)/ gsutil cp $(GOBIN)/docker-machine-driver-kvm2.sha256 gs://minikube/drivers/kvm/$(VERSION)/ -site/themes/docsy/assets/vendor/bootstrap/package.js: +site/themes/docsy/assets/vendor/bootstrap/package.js: ## update the website docsy theme git submodule git submodule update -f --init --recursive out/hugo/hugo: From c73d4ca32b26facb62f7dee27630dc376ea86ced Mon Sep 17 00:00:00 2001 From: Yang Keao Date: Thu, 26 Mar 2020 18:51:25 +0800 Subject: [PATCH 288/668] add TBF and IPSET filter to kernel config Signed-off-by: Yang Keao --- .../iso/minikube-iso/board/coreos/minikube/linux_defconfig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig b/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig index ebf694f191..a766017e78 100644 --- a/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig +++ b/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig @@ -1,4 +1,5 @@ # CONFIG_LOCALVERSION_AUTO is not set +CONFIG_KERNEL_LZ4=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y @@ -25,10 +26,10 @@ CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y CONFIG_USER_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_BPF_SYSCALL=y -CONFIG_CGROUP_BPF=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_SMP=y @@ -270,12 +271,14 @@ CONFIG_BRIDGE_EBT_LOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_BRIDGE=m CONFIG_NET_SCHED=y +CONFIG_NET_SCH_TBF=y CONFIG_NET_SCH_NETEM=y CONFIG_NET_SCH_INGRESS=m CONFIG_NET_CLS_U32=m CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_BPF=m CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_IPSET=y CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_MIRRED=m CONFIG_NET_ACT_BPF=m From cd3283d5c8d0777d91a01c6bcd2c9f4cbdc398c2 Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Thu, 26 Mar 2020 08:18:08 -0500 Subject: [PATCH 289/668] give some love to actions --- .github/workflows/main.yml | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a36dfb1a19..8d6a7f8b82 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -211,15 +211,21 @@ jobs: echo $STAT | jq '.FailedTests' || true echo "-------------------------------------------------------" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi - docker_windows_NT: + docker_windows_shell: needs: [build_minikube] env: TIME_ELAPSED: time - JOB_NAME: "Docker_windows_NT" + JOB_NAME: "Docker_windows_shell" COMMIT_STATUS: "" runs-on: windows-latest steps: - uses: actions/checkout@v2 + - name: Docker Info + shell: bash + run: | + docker info || true + docker version || true + docker ps || true - name: Download gopogh run: | curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh.exe @@ -235,7 +241,7 @@ jobs: mkdir -p report mkdir -p testhome START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe --expected-default-driver=hyperv -minikube-start-args=--vm-driver=hyperv -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe --expected-default-driver=hyperv -minikube-start-args=--vm-driver=docker -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -256,7 +262,7 @@ jobs: shell: bash - uses: actions/upload-artifact@v1 with: - name: docker_windows_NT + name: docker_windows_shell path: report - name: The End Result run: | @@ -265,6 +271,8 @@ jobs: echo "----------------${numFail} Failures----------------------------" echo $STAT | jq '.FailedTests' || true echo "--------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi shell: bash none_ubuntu16_04: From 1520b592768096fb620e20fce7ba8c97275a8e93 Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Thu, 26 Mar 2020 08:19:48 -0500 Subject: [PATCH 290/668] update stage name --- .github/workflows/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 8d6a7f8b82..31be5e710c 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -499,15 +499,15 @@ jobs: run: | mkdir -p all_reports cp -r docker_ubuntu_18_04 ./all_reports/ - - name: download results docker_windows_NT + - name: download results docker_windows_shell uses: actions/download-artifact@v1 with: - name: docker_windows_NT + name: docker_windows_shell - name: cp to all_report shell: bash run: | mkdir -p all_reports - cp -r docker_windows_NT ./all_reports/ + cp -r docker_windows_shell ./all_reports/ - name: Download Results none_ubuntu16_04 uses: actions/download-artifact@v1 with: From 3adfa8304c945773e91b07a249930743e3a29c60 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 26 Mar 2020 06:30:20 -0700 Subject: [PATCH 291/668] Turn log message back into comment --- pkg/minikube/machine/fix.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 05cbb8eec5..9a96df2afb 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -77,8 +77,8 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. return h, err } + // Avoid reprovisioning "none" driver because provision.Detect requires SSH if !driver.BareMetal(h.Driver.DriverName()) { - glog.Infof("%s is local, skipping re-provision as it requires SSH", driverName) e := engineOptions(cc) h.HostOptions.EngineOptions.Env = e.Env err = provisionDockerMachine(h) From 42a28d014e5167b9ce453a08b4038ecdb4986517 Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Thu, 26 Mar 2020 08:32:59 -0500 Subject: [PATCH 292/668] Update main.yml --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 31be5e710c..c6bf2a4574 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -241,7 +241,7 @@ jobs: mkdir -p report mkdir -p testhome START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe --expected-default-driver=hyperv -minikube-start-args=--vm-driver=docker -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe --expected-default-driver=docker -minikube-start-args=--vm-driver=docker -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) From 4f1ae0cd4fe50e5ffb924ece8b1b1947966c3c8f Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Thu, 26 Mar 2020 08:47:00 -0500 Subject: [PATCH 293/668] Update main.yml --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index c6bf2a4574..4eb9b9706a 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -241,7 +241,7 @@ jobs: mkdir -p report mkdir -p testhome START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe --expected-default-driver=docker -minikube-start-args=--vm-driver=docker -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe --expected-default-driver=hyperv -minikube-start-args=--vm-driver=hyperv -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) From d2c71b5363bc02b37f1e89ddc6f78a35e0d40512 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 26 Mar 2020 08:11:39 -0700 Subject: [PATCH 294/668] Fix testing regression which broke stdout reads --- test/integration/helpers.go | 8 ++++---- test/integration/version_upgrade_test.go | 7 +++++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/test/integration/helpers.go b/test/integration/helpers.go index 8ac021862a..89eef10dc2 100644 --- a/test/integration/helpers.go +++ b/test/integration/helpers.go @@ -64,8 +64,8 @@ func (rr RunResult) Command() string { } // indentLines indents every line in a bytes.Buffer and returns it as string -func indentLines(b *bytes.Buffer) string { - scanner := bufio.NewScanner(b) +func indentLines(b []byte) string { + scanner := bufio.NewScanner(bytes.NewReader(b)) var lines string for scanner.Scan() { lines = lines + "\t" + scanner.Text() + "\n" @@ -77,10 +77,10 @@ func indentLines(b *bytes.Buffer) string { func (rr RunResult) Output() string { var sb strings.Builder if rr.Stdout.Len() > 0 { - sb.WriteString(fmt.Sprintf("\n-- stdout --\n%s\n-- /stdout --", indentLines(rr.Stdout))) + sb.WriteString(fmt.Sprintf("\n-- stdout --\n%s\n-- /stdout --", indentLines(rr.Stdout.Bytes()))) } if rr.Stderr.Len() > 0 { - sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", indentLines(rr.Stderr))) + sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", indentLines(rr.Stderr.Bytes()))) } return sb.String() } diff --git a/test/integration/version_upgrade_test.go b/test/integration/version_upgrade_test.go index 2eb4eaabb6..fa6a4d4653 100644 --- a/test/integration/version_upgrade_test.go +++ b/test/integration/version_upgrade_test.go @@ -89,9 +89,10 @@ func TestVersionUpgrade(t *testing.T) { if err != nil { t.Logf("status error: %v (may be ok)", err) } + got := strings.TrimSpace(rr.Stdout.String()) if got != state.Stopped.String() { - t.Errorf("status = %q; want = %q", got, state.Stopped.String()) + t.Errorf("FAILED: status = %q; want = %q", got, state.Stopped.String()) } args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) @@ -119,14 +120,16 @@ func TestVersionUpgrade(t *testing.T) { t.Fatalf("expected server version %s is not the same with latest version %s", cv.ServerVersion.GitVersion, constants.NewestKubernetesVersion) } + t.Logf("Attempting to downgrade Kubernetes (should fail)") args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) if rr, err := Run(t, exec.CommandContext(ctx, tf.Name(), args...)); err == nil { t.Fatalf("downgrading kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Command()) } + t.Logf("Attempting restart after unsuccessful downgrade") args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("start and already started minikube failed. args: %q : %v", rr.Command(), err) + t.Errorf("start after failed upgrade: %v", err) } } From daffae3793b51921b8c74958a48c6446d1e7856b Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 26 Mar 2020 09:07:14 -0700 Subject: [PATCH 295/668] Wait for control-plane to upgrade before proceeding --- .../bootstrapper/bsutil/kverify/kverify.go | 35 ++++++++++- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 63 ++++++++++++++++--- 2 files changed, 86 insertions(+), 12 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go index 39cd1ea169..04156f2e13 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go @@ -30,9 +30,11 @@ import ( "github.com/docker/machine/libmachine/state" "github.com/golang/glog" + "github.com/pkg/errors" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/kubernetes" kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/minikube/pkg/minikube/bootstrapper" @@ -61,6 +63,7 @@ func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, c if _, ierr := apiServerPID(cr); ierr != nil { return false, nil } + return true, nil }) if err != nil { @@ -180,7 +183,7 @@ func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg con } // WaitForHealthyAPIServer waits for api server status to be running -func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, ip string, port int, timeout time.Duration) error { +func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, ip string, port int, timeout time.Duration) error { glog.Infof("waiting for apiserver healthz status ...") hStart := time.Now() @@ -208,7 +211,35 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, healthz); err != nil { return fmt.Errorf("apiserver healthz never reported healthy") } - glog.Infof("duration metric: took %s to wait for apiserver healthz status ...", time.Since(hStart)) + + vcheck := func() (bool, error) { + if time.Since(start) > timeout { + return false, fmt.Errorf("cluster wait timed out during version check") + } + if err := APIServerVersionMatch(client, cfg.KubernetesConfig.KubernetesVersion); err != nil { + glog.Warningf("api server version match failed: %v", err) + return false, nil + } + return true, nil + } + + if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, vcheck); err != nil { + return fmt.Errorf("controlPlane never updated to %s", cfg.KubernetesConfig.KubernetesVersion) + } + + glog.Infof("duration metric: took %s to wait for apiserver health ...", time.Since(hStart)) + return nil +} + +func APIServerVersionMatch(client *kubernetes.Clientset, expected string) error { + vi, err := client.ServerVersion() + if err != nil { + return errors.Wrap(err, "server version") + } + glog.Infof("control plane version: %s", vi) + if version.CompareKubeAwareVersionStrings(vi.String(), expected) != 0 { + return fmt.Errorf("controlPane = %q, expected: %q", vi.String(), expected) + } return nil } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 4ccc402698..ea2c15f6f2 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -56,6 +56,7 @@ import ( "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util" + "k8s.io/minikube/pkg/util/retry" "k8s.io/minikube/pkg/version" ) @@ -251,6 +252,27 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { return nil } +// unpause unpauses any Kubernetes backplane components +func (k *Bootstrapper) unpause(cfg config.ClusterConfig) error { + + cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) + if err != nil { + return err + } + + ids, err := cr.ListContainers(cruntime.ListOptions{State: cruntime.Paused, Namespaces: []string{"kube-system"}}) + if err != nil { + return errors.Wrap(err, "list paused") + } + + if len(ids) > 0 { + if err := cr.UnpauseContainers(ids); err != nil { + return err + } + } + return nil +} + // StartCluster starts the cluster func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { start := time.Now() @@ -259,6 +281,11 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { glog.Infof("StartCluster complete in %s", time.Since(start)) }() + // Before we start, ensure that no paused components are lurking around + if err := k.unpause(cfg); err != nil { + glog.Warningf("unpause failed: %v", err) + } + if err := bsutil.ExistingConfig(k.c); err == nil { glog.Infof("found existing configuration files, will attempt cluster restart") rerr := k.restartCluster(cfg) @@ -349,23 +376,23 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time return err } - if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, start, ip, port, timeout); err != nil { - return err - } - - c, err := k.client(ip, port) + client, err := k.client(ip, port) if err != nil { return errors.Wrap(err, "get k8s client") } - if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, c, start, timeout); err != nil { + if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, start, ip, port, timeout); err != nil { + return err + } + + if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, start, timeout); err != nil { return errors.Wrap(err, "waiting for system pods") } return nil } // needsReset returns whether or not the cluster needs to be reconfigured -func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kubernetes.Clientset) bool { +func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kubernetes.Clientset, version string) bool { if rr, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")); err != nil { glog.Infof("needs reset: configs differ:\n%s", rr.Output()) return true @@ -386,6 +413,12 @@ func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kube glog.Infof("needs reset: %v", err) return true } + + if err := kverify.APIServerVersionMatch(client, version); err != nil { + glog.Infof("needs reset: %v", err) + return true + } + return false } @@ -426,7 +459,7 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { // If the cluster is running, check if we have any work to do. conf := bsutil.KubeadmYamlPath - if !k.needsReset(conf, ip, port, client) { + if !k.needsReset(conf, ip, port, client, cfg.KubernetesConfig.KubernetesVersion) { glog.Infof("Taking a shortcut, as the cluster seems to be properly configured") return nil } @@ -466,12 +499,22 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "apiserver healthz") } + if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, time.Now(), ip, port, kconst.DefaultControlPlaneTimeout); err != nil { + return errors.Wrap(err, "apiserver health") + } + if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { return errors.Wrap(err, "system pods") } - if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, conf))); err != nil { - return errors.Wrapf(err, fmt.Sprintf("addon phase cmd:%q", rr.Command())) + // This can fail during upgrades if the old pods have not shut down yet + addonPhase := func() error { + _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, conf))) + return err + } + if err = retry.Expo(addonPhase, 1*time.Second, 30*time.Second); err != nil { + glog.Warningf("addon install failed, wil retry: %v", err) + return errors.Wrap(err, "addons") } if err := bsutil.AdjustResourceLimits(k.c); err != nil { From 974d45dfd31722df88f3e3a2ced80f0a4c1f76fd Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 26 Mar 2020 10:01:55 -0700 Subject: [PATCH 296/668] make http error test fatal to avoid nil pointer --- test/integration/functional_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index a5ee68aa73..6623f1f362 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -335,7 +335,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { resp, err := retryablehttp.Get(u.String()) if err != nil { - t.Errorf("failed to http get %q : %v", u.String(), err) + t.Fatalf("failed to http get %q : %v", u.String(), err) } if resp.StatusCode != http.StatusOK { body, err := ioutil.ReadAll(resp.Body) From efbe113a94fb37d880aaee006ee729c7b6ac4896 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 26 Mar 2020 10:13:26 -0700 Subject: [PATCH 297/668] Add more logging to preload --- hack/preload-images/upload.go | 4 ++-- pkg/minikube/download/preload.go | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/hack/preload-images/upload.go b/hack/preload-images/upload.go index a2181294e6..b903f3cc11 100644 --- a/hack/preload-images/upload.go +++ b/hack/preload-images/upload.go @@ -30,13 +30,13 @@ func uploadTarball(tarballFilename string) error { hostPath := path.Join("out/", tarballFilename) gcsDest := fmt.Sprintf("gs://%s", download.PreloadBucket) cmd := exec.Command("gsutil", "cp", hostPath, gcsDest) - if output, err := cmd.Output(); err != nil { + if output, err := cmd.CombinedOutput(); err != nil { return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output)) } // Make tarball public to all users gcsPath := fmt.Sprintf("%s/%s", gcsDest, tarballFilename) cmd = exec.Command("gsutil", "acl", "ch", "-u", "AllUsers:R", gcsPath) - if output, err := cmd.Output(); err != nil { + if output, err := cmd.CombinedOutput(); err != nil { return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output)) } return nil diff --git a/pkg/minikube/download/preload.go b/pkg/minikube/download/preload.go index 44b2f81749..dc834fb7f0 100644 --- a/pkg/minikube/download/preload.go +++ b/pkg/minikube/download/preload.go @@ -77,6 +77,7 @@ func remoteTarballURL(k8sVersion string) string { // PreloadExists returns true if there is a preloaded tarball that can be used func PreloadExists(k8sVersion, containerRuntime string) bool { + glog.Infof("Checking if preload exists for k8s version %s and runtime %s", k8sVersion, containerRuntime) if !viper.GetBool("preload") { return false } @@ -85,6 +86,7 @@ func PreloadExists(k8sVersion, containerRuntime string) bool { // and https://github.com/kubernetes/minikube/issues/6934 // to track status of adding containerd & crio if containerRuntime != "docker" { + glog.Info("Container runtime isn't docker, skipping preload") return false } From 27685523611e514429d2c26f1c5d93871e1a700b Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 26 Mar 2020 10:23:30 -0700 Subject: [PATCH 298/668] Use download.TarballExists to make sure tarball is publicly accessible in script generator --- hack/preload-images/preload_images.go | 9 +-------- hack/preload-images/upload.go | 7 +++++++ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/hack/preload-images/preload_images.go b/hack/preload-images/preload_images.go index 60b6bc9e2a..948918d8f9 100644 --- a/hack/preload-images/preload_images.go +++ b/hack/preload-images/preload_images.go @@ -62,7 +62,7 @@ func main() { for _, kv := range k8sVersions { for _, cr := range containerRuntimes { tf := download.TarballName(kv) - if tarballExists(tf) { + if download.PreloadExists(kv, cr) { fmt.Printf("A preloaded tarball for k8s version %s already exists, skipping generation.\n", kv) continue } @@ -77,13 +77,6 @@ func main() { } } -func tarballExists(tarballFilename string) bool { - fmt.Println("Checking if tarball already exists...") - gcsPath := fmt.Sprintf("gs://%s/%s", download.PreloadBucket, tarballFilename) - cmd := exec.Command("gsutil", "stat", gcsPath) - return cmd.Run() == nil -} - func verifyDockerStorage() error { cmd := exec.Command("docker", "info", "-f", "{{.Info.Driver}}") var stderr bytes.Buffer diff --git a/hack/preload-images/upload.go b/hack/preload-images/upload.go index b903f3cc11..f6235a38f4 100644 --- a/hack/preload-images/upload.go +++ b/hack/preload-images/upload.go @@ -37,6 +37,13 @@ func uploadTarball(tarballFilename string) error { gcsPath := fmt.Sprintf("%s/%s", gcsDest, tarballFilename) cmd = exec.Command("gsutil", "acl", "ch", "-u", "AllUsers:R", gcsPath) if output, err := cmd.CombinedOutput(); err != nil { + fmt.Printf(`Failed to update ACLs on this tarball in GCS. Please run + +gsutil acl ch -u AllUsers:R %s + +manually to make this link public, or rerun this script to rebuild and reupload the tarball. + + `, gcsPath) return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output)) } return nil From 2a1ee510837ae5d870275dfa89c8acc4ded28ac2 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 26 Mar 2020 10:31:46 -0700 Subject: [PATCH 299/668] add logging --- hack/preload-images/upload.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hack/preload-images/upload.go b/hack/preload-images/upload.go index f6235a38f4..1a6c02af75 100644 --- a/hack/preload-images/upload.go +++ b/hack/preload-images/upload.go @@ -30,12 +30,14 @@ func uploadTarball(tarballFilename string) error { hostPath := path.Join("out/", tarballFilename) gcsDest := fmt.Sprintf("gs://%s", download.PreloadBucket) cmd := exec.Command("gsutil", "cp", hostPath, gcsDest) + fmt.Printf("Running: %v", cmd.Args) if output, err := cmd.CombinedOutput(); err != nil { return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output)) } // Make tarball public to all users gcsPath := fmt.Sprintf("%s/%s", gcsDest, tarballFilename) cmd = exec.Command("gsutil", "acl", "ch", "-u", "AllUsers:R", gcsPath) + fmt.Printf("Running: %v", cmd.Args) if output, err := cmd.CombinedOutput(); err != nil { fmt.Printf(`Failed to update ACLs on this tarball in GCS. Please run From c9c324242ce6f768cd5ac924c628ec68ca634dd8 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 26 Mar 2020 10:37:29 -0700 Subject: [PATCH 300/668] add newlines --- hack/preload-images/upload.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/preload-images/upload.go b/hack/preload-images/upload.go index 1a6c02af75..89bbd8004b 100644 --- a/hack/preload-images/upload.go +++ b/hack/preload-images/upload.go @@ -30,14 +30,14 @@ func uploadTarball(tarballFilename string) error { hostPath := path.Join("out/", tarballFilename) gcsDest := fmt.Sprintf("gs://%s", download.PreloadBucket) cmd := exec.Command("gsutil", "cp", hostPath, gcsDest) - fmt.Printf("Running: %v", cmd.Args) + fmt.Printf("Running: %v\n", cmd.Args) if output, err := cmd.CombinedOutput(); err != nil { return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output)) } // Make tarball public to all users gcsPath := fmt.Sprintf("%s/%s", gcsDest, tarballFilename) cmd = exec.Command("gsutil", "acl", "ch", "-u", "AllUsers:R", gcsPath) - fmt.Printf("Running: %v", cmd.Args) + fmt.Printf("Running: %v\n", cmd.Args) if output, err := cmd.CombinedOutput(); err != nil { fmt.Printf(`Failed to update ACLs on this tarball in GCS. Please run From e293b384311b7653922031b5951364150e583033 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 26 Mar 2020 10:42:45 -0700 Subject: [PATCH 301/668] Set --preload flag to true so that download.PreloadExists doesn't immeditely return false --- hack/preload-images/preload_images.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hack/preload-images/preload_images.go b/hack/preload-images/preload_images.go index 948918d8f9..928cd44821 100644 --- a/hack/preload-images/preload_images.go +++ b/hack/preload-images/preload_images.go @@ -23,6 +23,7 @@ import ( "os/exec" "strings" + "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/exit" ) @@ -45,6 +46,7 @@ func init() { if k8sVersion != "" { k8sVersions = append(k8sVersions, k8sVersion) } + viper.Set("preload", "true") } func main() { From bbe64aa712ca01f55020ac81ae02cfcb4bb7b4da Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 26 Mar 2020 10:55:24 -0700 Subject: [PATCH 302/668] Upgrade to v1.9.0 and add release notes --- CHANGELOG.md | 35 +++++++++++++++++++++++++++++++++++ Makefile | 2 +- 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe559157a9..376ef4e7f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,40 @@ # Release Notes +## Version 1.9.0- 2020-03-26 + +New features & improvements + +* Update DefaultKubernetesVersion to v1.18.0 [#7235](https://github.com/kubernetes/minikube/pull/7235) +* Add --vm flag for users who want to autoselect only VM's [#7068](https://github.com/kubernetes/minikube/pull/7068) +* Add 'stable' and 'latest' as valid kubernetes-version values [#7212](https://github.com/kubernetes/minikube/pull/7212) + +* gpu addon: privileged mode no longer required [#7149](https://github.com/kubernetes/minikube/pull/7149) +* Add sch_tbf and extend filter ipset kernel module for bandwidth shaping [#7255](https://github.com/kubernetes/minikube/pull/7255) +* Parse --disk-size and --memory sizes with binary suffixes [#7206](https://github.com/kubernetes/minikube/pull/7206) +* local search for the site [#7253](https://github.com/kubernetes/minikube/pull/7253) + + +Bug Fixes + +* Re-initalize failed Kubernetes clusters [#7234](https://github.com/kubernetes/minikube/pull/7234) +* do not override hostname if extraConfig is specified [#7238](https://github.com/kubernetes/minikube/pull/7238) +* Enable HW_RANDOM_VIRTIO to fix sshd startup delays [#7208](https://github.com/kubernetes/minikube/pull/7208) +* hyperv Delete: call StopHost before removing VM [#7160](https://github.com/kubernetes/minikube/pull/7160) + +Huge thank you for this release towards our contributors: + +- Anders F Björklund +- Medya Ghazizadeh +- Priya Wadhwa +- Sharif Elgamal +- Thomas Strömberg +- Tom +- Vincent Link +- Yang Keao +- Zhongcheng Lao +- vikkyomkar + + ## Version 1.9.0-beta.2 - 2020-03-21 New features & improvements diff --git a/Makefile b/Makefile index 43bb6b9cf2..049427610c 100755 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ # Bump these on release - and please check ISO_VERSION for correctness. VERSION_MAJOR ?= 1 VERSION_MINOR ?= 9 -VERSION_BUILD ?= 0-beta.2 +VERSION_BUILD ?= 0 RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD) VERSION ?= v$(RAW_VERSION) From 46f31480dfd964a7fbc3b41b2e806fae7936c72b Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 26 Mar 2020 14:03:22 -0700 Subject: [PATCH 303/668] Make slow command detection consistent across kic/oci --- go.sum | 24 ++++++++++++ pkg/drivers/kic/kic.go | 17 ++------- pkg/drivers/kic/oci/oci.go | 68 ++++++++++++++++++---------------- pkg/drivers/kic/oci/volumes.go | 29 ++------------- 4 files changed, 68 insertions(+), 70 deletions(-) diff --git a/go.sum b/go.sum index 15a7b48dae..c997562aea 100644 --- a/go.sum +++ b/go.sum @@ -49,8 +49,10 @@ github.com/Parallels/docker-machine-parallels v1.3.0 h1:RG1fyf3v1GwXMCeHRiZkB4tL github.com/Parallels/docker-machine-parallels v1.3.0/go.mod h1:HCOMm3Hulq/xuEVQMyZOuQlA+dSZpFY5kdCTZWjMVis= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= @@ -107,6 +109,7 @@ github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEe github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b h1:T4nWG1TXIxeor8mAu5bFguPJgSIGhZqv/f0z55KCrJM= github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= @@ -156,6 +159,7 @@ github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd h1:uVsMphB1eRx7xB1njzL3fuMdWRN8HtVzoUOItHMwv5c= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= @@ -197,7 +201,9 @@ github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6 github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= @@ -206,6 +212,7 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= @@ -233,11 +240,13 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+ github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -251,6 +260,7 @@ github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nA github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= @@ -260,6 +270,7 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= @@ -329,6 +340,7 @@ github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAO github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/cadvisor v0.35.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -372,6 +384,7 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= @@ -423,6 +436,7 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f h1:tL0xH80QVHQOde6Qqdohv6PewABH8l8N9pywZtuojJ0= github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= @@ -476,7 +490,9 @@ github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LE github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= github.com/libvirt/libvirt-go v3.4.0+incompatible h1:Cpyalgj1x8JIeTlL6SDYZBo7j8nY3+5XHqmi8DaunCk= github.com/libvirt/libvirt-go v3.4.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= @@ -498,6 +514,7 @@ github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -559,6 +576,7 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= @@ -604,6 +622,7 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= @@ -1023,11 +1042,13 @@ honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.17.3 h1:XAm3PZp3wnEdzekNkcmj/9Y1zdmQYJ1I4GKSBBZ8aG0= k8s.io/api v0.17.3/go.mod h1:YZ0OTkuw7ipbe305fMpIdf3GLXZKRigjtZaV5gzC2J0= +k8s.io/apiextensions-apiserver v0.17.3 h1:WDZWkPcbgvchEdDd7ysL21GGPx3UKZQLDZXEkevT6n4= k8s.io/apiextensions-apiserver v0.17.3/go.mod h1:CJbCyMfkKftAd/X/V6OTHYhVn7zXnDdnkUjS1h0GTeY= k8s.io/apimachinery v0.17.3 h1:f+uZV6rm4/tHE7xXgLyToprg6xWairaClGVkm2t8omg= k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= k8s.io/apiserver v0.17.3 h1:faZbSuFtJ4dx09vctKZGHms/7bp3qFtbqb10Swswqfs= k8s.io/apiserver v0.17.3/go.mod h1:iJtsPpu1ZpEnHaNawpSV0nYTGBhhX2dUlnn7/QS7QiY= +k8s.io/cli-runtime v0.17.3 h1:0ZlDdJgJBKsu77trRUynNiWsRuAvAVPBNaQfnt/1qtc= k8s.io/cli-runtime v0.17.3/go.mod h1:X7idckYphH4SZflgNpOOViSxetiMj6xI0viMAjM81TA= k8s.io/client-go v0.17.3 h1:deUna1Ksx05XeESH6XGCyONNFfiQmDdqeqUvicvP6nU= k8s.io/client-go v0.17.3/go.mod h1:cLXlTMtWHkuK4tD360KpWz2gG2KtdWEr/OT02i3emRQ= @@ -1062,6 +1083,7 @@ k8s.io/kubelet v0.17.3/go.mod h1:Nh8owUHZcUXtnDAtmGnip36Nw+X6c4rbmDQlVyIhwMQ= k8s.io/kubernetes v1.17.3 h1:zWCppkLfHM+hoLqfbsrQ0cJnYw+4vAvedI92oQnjo/Q= k8s.io/kubernetes v1.17.3/go.mod h1:gt28rfzaskIzJ8d82TSJmGrJ0XZD0BBy8TcQvTuCI3w= k8s.io/legacy-cloud-providers v0.17.3/go.mod h1:ujZML5v8efVQxiXXTG+nck7SjP8KhMRjUYNIsoSkYI0= +k8s.io/metrics v0.17.3 h1:IqXkNK+5E3vnobFD923Mn1QJEt3fb6+sK0wIjtBzOvw= k8s.io/metrics v0.17.3/go.mod h1:HEJGy1fhHOjHggW9rMDBJBD3YuGroH3Y1pnIRw9FFaI= k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= k8s.io/sample-apiserver v0.17.3/go.mod h1:cn/rvFIttGNqy1v88B5ZlDAbyyqDOoF7JHSwPiqNCNQ= @@ -1079,6 +1101,7 @@ mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIa mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.0+incompatible h1:qV3eFdgCp7Cp/ORjkJI9VBBEOntT+z385jLqdBtmgHA= sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.0+incompatible/go.mod h1:qhqLyNwJC49PoUalmtzYb4s9fT8HOMBTLbTY1QoVOqI= @@ -1087,4 +1110,5 @@ sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1 sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= +vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc h1:MksmcCZQWAQJCTA5T0jgI/0sJ51AVm4Z41MrmfczEoc= vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index 75f80e5c7d..692bcdf485 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -17,7 +17,6 @@ limitations under the License. package kic import ( - "context" "fmt" "net" "os/exec" @@ -220,20 +219,12 @@ func (d *Driver) GetURL() (string, error) { // GetState returns the state that the host is in (running, stopped, etc) func (d *Driver) GetState() (state.State, error) { - // allow no more than 2 seconds for this. when this takes long this means deadline passed - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - cmd := exec.CommandContext(ctx, d.NodeConfig.OCIBinary, "inspect", "-f", "{{.State.Status}}", d.MachineName) - out, err := cmd.CombinedOutput() - if ctx.Err() == context.DeadlineExceeded { - glog.Errorf("GetState for %s took longer than normal. Restarting your %s daemon might fix this issue.", d.MachineName, d.OCIBinary) - return state.Error, fmt.Errorf("inspect %s timeout", d.MachineName) - } - o := strings.TrimSpace(string(out)) + out, err := oci.WarnIfSlow(d.NodeConfig.OCIBinary, "inspect", "-f", "{{.State.Status}}", d.MachineName) if err != nil { - return state.Error, errors.Wrapf(err, "%s: %s", strings.Join(cmd.Args, " "), o) + return state.Error, err } + + o := strings.TrimSpace(string(out)) switch o { case "running": return state.Running, nil diff --git a/pkg/drivers/kic/oci/oci.go b/pkg/drivers/kic/oci/oci.go index 5885b0e1cf..bfa1d0233c 100644 --- a/pkg/drivers/kic/oci/oci.go +++ b/pkg/drivers/kic/oci/oci.go @@ -29,6 +29,7 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/util/retry" "fmt" @@ -232,19 +233,39 @@ func ContainerID(ociBinary string, nameOrID string) (string, error) { return string(out), err } -// ContainerExists checks if container name exists (either running or exited) -func ContainerExists(ociBin string, name string) (bool, error) { - // allow no more than 3 seconds for this. - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) +// WarnIfSlow runs an oci command, warning about performance issues +func WarnIfSlow(arg ...string) ([]byte, error) { + killTime := 3 * time.Millisecond + warnTime := 1 * time.Millisecond + + ctx, cancel := context.WithTimeout(context.Background(), killTime) defer cancel() - cmd := exec.CommandContext(ctx, ociBin, "ps", "-a", "--format", "{{.Names}}") - out, err := cmd.CombinedOutput() - - if ctx.Err() == context.DeadlineExceeded { - return false, fmt.Errorf("time out running %s ps -a", ociBin) + start := time.Now() + glog.Infof("executing with %s timeout: %v", arg, killTime) + cmd := exec.CommandContext(ctx, arg[0], arg[1:]...) + stdout, err := cmd.Output() + d := time.Since(start) + if d > warnTime { + out.WarningT(`Executing "{{.command}}" took an unusually long time: {{.duration}}`, out.V{"command": strings.Join(cmd.Args, " "), "duration": d}) + out.T(out.Tip, `Restarting the {{.name}} service may improve performance.`, out.V{"name": arg[0]}) } + if ctx.Err() == context.DeadlineExceeded { + return stdout, fmt.Errorf("%q timed out after %s", strings.Join(cmd.Args, " "), killTime) + } + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return stdout, fmt.Errorf("%q failed: %v: %s", strings.Join(cmd.Args, " "), exitErr, exitErr.Stderr) + } + return stdout, fmt.Errorf("%q failed: %v", strings.Join(cmd.Args, " "), err) + } + return stdout, nil +} + +// ContainerExists checks if container name exists (either running or exited) +func ContainerExists(ociBin string, name string) (bool, error) { + out, err := WarnIfSlow(ociBin, "ps", "-a", "--format", "{{.Names}}") if err != nil { return false, errors.Wrapf(err, string(out)) } @@ -410,12 +431,10 @@ func withPortMappings(portMappings []PortMapping) createOpt { // listContainersByLabel returns all the container names with a specified label func listContainersByLabel(ociBinary string, label string) ([]string, error) { - - // allow no more than 5 seconds for docker ps - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - cmd := exec.CommandContext(ctx, ociBinary, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}") - stdout, err := cmd.Output() + stdout, err := WarnIfSlow(ociBinary, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}") + if err != nil { + return nil, err + } s := bufio.NewScanner(bytes.NewReader(stdout)) var names []string for s.Scan() { @@ -448,21 +467,6 @@ func PointToHostDockerDaemon() error { // ContainerStatus returns status of a container running,exited,... func ContainerStatus(ociBin string, name string) (string, error) { - // allow no more than 2 seconds for this. when this takes long this means deadline passed - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - cmd := exec.CommandContext(ctx, ociBin, "inspect", name, "--format={{.State.Status}}") - out, err := cmd.CombinedOutput() - - if ctx.Err() == context.DeadlineExceeded { - glog.Warningf("%s inspect %s took longer than normal. Restarting your %s daemon might fix this issue.", ociBin, name, ociBin) - return strings.TrimSpace(string(out)), fmt.Errorf("inspect %s timeout", name) - } - - if err != nil { - return string(out), errors.Wrapf(err, "inspecting container: output %s", out) - } - - return strings.TrimSpace(string(out)), nil + out, err := WarnIfSlow(ociBin, "inspect", name, "--format={{.State.Status}}") + return strings.TrimSpace(string(out)), err } diff --git a/pkg/drivers/kic/oci/volumes.go b/pkg/drivers/kic/oci/volumes.go index 58c13b9621..2fbd9f32b0 100644 --- a/pkg/drivers/kic/oci/volumes.go +++ b/pkg/drivers/kic/oci/volumes.go @@ -19,11 +19,9 @@ package oci import ( "bufio" "bytes" - "context" "fmt" "os/exec" "strings" - "time" "github.com/golang/glog" "github.com/pkg/errors" @@ -42,16 +40,8 @@ func DeleteAllVolumesByLabel(ociBin string, label string) []error { } for _, v := range vs { - // allow no more than 3 seconds for this. when this takes long this means deadline passed - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - cmd := exec.CommandContext(ctx, ociBin, "volume", "rm", "--force", v) - if ctx.Err() == context.DeadlineExceeded { - glog.Warningf("removing volume with label %s took longer than normal. Restarting your %s daemon might fix this issue.", label, ociBin) - deleteErrs = append(deleteErrs, fmt.Errorf("delete deadline exceeded for %s", label)) - } - if out, err := cmd.CombinedOutput(); err != nil { - deleteErrs = append(deleteErrs, fmt.Errorf("deleting volume %s: output: %s", v, string(out))) + if _, err := WarnIfSlow(ociBin, "volume", "rm", "--force", v); err != nil { + deleteErrs = append(deleteErrs, fmt.Errorf("deleting %q", v)) } } @@ -65,19 +55,8 @@ func PruneAllVolumesByLabel(ociBin string, label string) []error { var deleteErrs []error glog.Infof("trying to prune all %s volumes with label %s", ociBin, label) - // allow no more than 3 seconds for this. when this takes long this means deadline passed - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - - // try to prune afterwards just in case delete didn't go through - cmd := exec.CommandContext(ctx, ociBin, "volume", "prune", "-f", "--filter", "label="+label) - if out, err := cmd.CombinedOutput(); err != nil { - deleteErrs = append(deleteErrs, errors.Wrapf(err, "prune volume by label %s: %s", label, string(out))) - } - - if ctx.Err() == context.DeadlineExceeded { - glog.Warningf("pruning volume with label %s took longer than normal. Restarting your %s daemon might fix this issue.", label, ociBin) - deleteErrs = append(deleteErrs, fmt.Errorf("prune deadline exceeded for %s", label)) + if _, err := WarnIfSlow(ociBin, "volume", "prune", "-f", "--filter", "label="+label); err != nil { + deleteErrs = append(deleteErrs, errors.Wrapf(err, "prune volume by label %s", label)) } return deleteErrs From 2f2dccd30c733676252838941ac9108fb89a92ae Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 26 Mar 2020 14:05:02 -0700 Subject: [PATCH 304/668] Remove test values --- pkg/drivers/kic/oci/oci.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/drivers/kic/oci/oci.go b/pkg/drivers/kic/oci/oci.go index bfa1d0233c..af6cd19c50 100644 --- a/pkg/drivers/kic/oci/oci.go +++ b/pkg/drivers/kic/oci/oci.go @@ -235,8 +235,8 @@ func ContainerID(ociBinary string, nameOrID string) (string, error) { // WarnIfSlow runs an oci command, warning about performance issues func WarnIfSlow(arg ...string) ([]byte, error) { - killTime := 3 * time.Millisecond - warnTime := 1 * time.Millisecond + killTime := 15 * time.Second + warnTime := 2 * time.Second ctx, cancel := context.WithTimeout(context.Background(), killTime) defer cancel() From 11ba7c6eb3634010b07ca97dcaabc4b203550844 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 26 Mar 2020 14:27:51 -0700 Subject: [PATCH 305/668] Set minLogCheckTime to 60 until we can cut down on spam --- pkg/minikube/bootstrapper/bsutil/kverify/kverify.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go index 04156f2e13..6f47fc3204 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go @@ -45,7 +45,7 @@ import ( ) // minLogCheckTime how long to wait before spamming error logs to console -const minLogCheckTime = 30 * time.Second +const minLogCheckTime = 60 * time.Second // WaitForAPIServerProcess waits for api server to be healthy returns error if it doesn't func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, timeout time.Duration) error { From c6bca4d1051e061d4c0aa474663290489666ee17 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 26 Mar 2020 14:53:05 -0700 Subject: [PATCH 306/668] Add more debugging for certificate issues --- pkg/minikube/bootstrapper/certs.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index de938082cc..cbd9d27b86 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -117,6 +117,7 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) } for _, f := range copyableFiles { + glog.Infof("copying: %s/%s", f.GetTargetDir(), f.GetTargetName()) if err := cmd.Copy(f); err != nil { return nil, errors.Wrapf(err, "Copy %s", f.GetAssetName()) } @@ -325,6 +326,7 @@ func collectCACerts() (map[string]string, error) { if info != nil && !info.IsDir() { ext := strings.ToLower(filepath.Ext(hostpath)) if ext == ".crt" || ext == ".pem" { + glog.Infof("found cert: %s (%d bytes)", info.Name(), info.Size()) validPem, err := isValidPEMCertificate(hostpath) if err != nil { return err @@ -360,9 +362,16 @@ func collectCACerts() (map[string]string, error) { // getSubjectHash calculates Certificate Subject Hash for creating certificate symlinks func getSubjectHash(cr command.Runner, filePath string) (string, error) { + lrr, err := cr.RunCmd(exec.Command("ls", "-la", filePath)) + if err != nil { + return "", err + } + glog.Infof("hashing: %s", lrr.Stdout.String()) + rr, err := cr.RunCmd(exec.Command("openssl", "x509", "-hash", "-noout", "-in", filePath)) if err != nil { - return "", errors.Wrapf(err, rr.Command()) + crr, _ := cr.RunCmd(exec.Command("cat", filePath)) + return "", errors.Wrapf(err, "cert:\n%s\n---\n%s", lrr.Output(), crr.Stdout.String()) } stringHash := strings.TrimSpace(rr.Stdout.String()) return stringHash, nil From eb0e6f7b61618c4c9ad66ec81d3391a5ec34cd90 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 26 Mar 2020 15:07:39 -0700 Subject: [PATCH 307/668] update changelog --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 376ef4e7f6..f0829a83f7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,7 +11,6 @@ New features & improvements * gpu addon: privileged mode no longer required [#7149](https://github.com/kubernetes/minikube/pull/7149) * Add sch_tbf and extend filter ipset kernel module for bandwidth shaping [#7255](https://github.com/kubernetes/minikube/pull/7255) * Parse --disk-size and --memory sizes with binary suffixes [#7206](https://github.com/kubernetes/minikube/pull/7206) -* local search for the site [#7253](https://github.com/kubernetes/minikube/pull/7253) Bug Fixes From fc262fff0487374c7bb0ec145c7a189ba9123deb Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 26 Mar 2020 15:56:40 -0700 Subject: [PATCH 308/668] Add crictl to github none CI --- .github/workflows/main.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 1ce020cbbe..cf6ed4730b 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -274,6 +274,9 @@ jobs: sudo apt-get update -qq sudo apt-get -qq -y install conntrack sudo apt-get -qq -y install socat + VERSION="v1.17.0" + curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz + sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin - name: Install gopogh shell: bash run: | @@ -352,6 +355,9 @@ jobs: sudo apt-get update -qq sudo apt-get -qq -y install conntrack sudo apt-get -qq -y install socat + VERSION="v1.17.0" + curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz + sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin - name: Install gopogh shell: bash run: | From 479525e4b0f4c862b3cffcb83e563d389a98f1c0 Mon Sep 17 00:00:00 2001 From: minikube-bot Date: Thu, 26 Mar 2020 16:03:04 -0700 Subject: [PATCH 309/668] Update releases.json to include v1.9.0 --- deploy/minikube/releases.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/deploy/minikube/releases.json b/deploy/minikube/releases.json index bc2e1d5181..2bb7364fcf 100644 --- a/deploy/minikube/releases.json +++ b/deploy/minikube/releases.json @@ -1,4 +1,12 @@ [ + { + "name": "v1.9.0", + "checksums": { + "darwin": "2a074b0d842e3d9272444990374c6ffc51878c2d11c0434f54e15269b59593f9", + "linux": "81d77d1babe63be393e0a3204aac7825eb35e0fdf58ffefd9f66508a43864866", + "windows": "d11a957704c23670eac453a47897449a2aaab13b7dcd6424307f8932ac9f81bb" + } + }, { "name": "v1.8.2", "checksums": { From 323dba4bc9e23b632ce6ae368eaa3921bba84d30 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 26 Mar 2020 16:14:55 -0700 Subject: [PATCH 310/668] Update docs to reflect release of v1.9.0 --- site/config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/site/config.toml b/site/config.toml index 23f8db88b6..c241a41581 100644 --- a/site/config.toml +++ b/site/config.toml @@ -95,7 +95,7 @@ weight = 1 [params] copyright = "The Kubernetes Authors -- " # The latest release of minikube -latest_release = "1.8.1" +latest_release = "1.9.0" privacy_policy = "" From e3472b6da0ff94a553d5d8b817e53eec20d08f97 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 26 Mar 2020 16:23:53 -0700 Subject: [PATCH 311/668] Add files missing for FreeBSD compilation --- pkg/minikube/constants/constants_freebsd.go | 26 +++ pkg/minikube/driver/driver_freebsd.go | 32 +++ pkg/minikube/tunnel/route_freebsd.go | 168 ++++++++++++++ third_party/go9p/srv_pipe_freebsd.go | 41 ++++ third_party/go9p/ufs_freebsd.go | 239 ++++++++++++++++++++ 5 files changed, 506 insertions(+) create mode 100644 pkg/minikube/constants/constants_freebsd.go create mode 100644 pkg/minikube/driver/driver_freebsd.go create mode 100644 pkg/minikube/tunnel/route_freebsd.go create mode 100644 third_party/go9p/srv_pipe_freebsd.go create mode 100644 third_party/go9p/ufs_freebsd.go diff --git a/pkg/minikube/constants/constants_freebsd.go b/pkg/minikube/constants/constants_freebsd.go new file mode 100644 index 0000000000..77a47a765f --- /dev/null +++ b/pkg/minikube/constants/constants_freebsd.go @@ -0,0 +1,26 @@ +// +build linux, !gendocs + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package constants + +import ( + "k8s.io/client-go/util/homedir" +) + +// DefaultMountDir is the default mount dir +var DefaultMountDir = homedir.HomeDir() diff --git a/pkg/minikube/driver/driver_freebsd.go b/pkg/minikube/driver/driver_freebsd.go new file mode 100644 index 0000000000..21b8761649 --- /dev/null +++ b/pkg/minikube/driver/driver_freebsd.go @@ -0,0 +1,32 @@ +/* +Copyright 2019 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import "os/exec" + +// supportedDrivers is a list of supported drivers on Darwin. +var supportedDrivers = []string{ + VirtualBox, +} + +func VBoxManagePath() string { + cmd := "VBoxManage" + if path, err := exec.LookPath(cmd); err == nil { + return path + } + return cmd +} diff --git a/pkg/minikube/tunnel/route_freebsd.go b/pkg/minikube/tunnel/route_freebsd.go new file mode 100644 index 0000000000..d88d5301ca --- /dev/null +++ b/pkg/minikube/tunnel/route_freebsd.go @@ -0,0 +1,168 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tunnel + +import ( + "fmt" + "net" + "os/exec" + "regexp" + "strings" + + "github.com/golang/glog" +) + +func (router *osRouter) EnsureRouteIsAdded(route *Route) error { + exists, err := isValidToAddOrDelete(router, route) + if err != nil { + return err + } + if exists { + return nil + } + + serviceCIDR := route.DestCIDR.String() + gatewayIP := route.Gateway.String() + + glog.Infof("Adding route for CIDR %s to gateway %s", serviceCIDR, gatewayIP) + command := exec.Command("sudo", "route", "-n", "add", serviceCIDR, gatewayIP) + glog.Infof("About to run command: %s", command.Args) + stdInAndOut, err := command.CombinedOutput() + message := fmt.Sprintf("%s", stdInAndOut) + re := regexp.MustCompile(fmt.Sprintf("add net (.*): gateway %s\n", gatewayIP)) + if !re.MatchString(message) { + return fmt.Errorf("error adding Route: %s, %d", message, len(strings.Split(message, "\n"))) + } + glog.Infof("%s", stdInAndOut) + + return err +} + +func (router *osRouter) Inspect(route *Route) (exists bool, conflict string, overlaps []string, err error) { + cmd := exec.Command("netstat", "-nr", "-f", "inet") + cmd.Env = append(cmd.Env, "LC_ALL=C") + stdInAndOut, err := cmd.CombinedOutput() + if err != nil { + err = fmt.Errorf("error running '%v': %s", cmd, err) + return + } + + rt := router.parseTable(stdInAndOut) + + exists, conflict, overlaps = rt.Check(route) + + return +} + +func (router *osRouter) parseTable(table []byte) routingTable { + t := routingTable{} + skip := true + for _, line := range strings.Split(string(table), "\n") { + // header + if strings.HasPrefix(line, "Destination") { + skip = false + continue + } + // don't care about the 0.0.0.0 routes + if skip || strings.HasPrefix(line, "default") { + continue + } + fields := strings.Fields(line) + + if len(fields) <= 2 { + continue + } + dstCIDRString := router.padCIDR(fields[0]) + gatewayIPString := fields[1] + gatewayIP := net.ParseIP(gatewayIPString) + + _, ipNet, err := net.ParseCIDR(dstCIDRString) + if err != nil { + glog.V(4).Infof("skipping line: can't parse CIDR from routing table: %s", dstCIDRString) + } else if gatewayIP == nil { + glog.V(4).Infof("skipping line: can't parse IP from routing table: %s", gatewayIPString) + } else { + tableLine := routingTableLine{ + route: &Route{ + DestCIDR: ipNet, + Gateway: gatewayIP, + }, + line: line, + } + t = append(t, tableLine) + } + } + + return t +} + +func (router *osRouter) padCIDR(origCIDR string) string { + s := "" + dots := 0 + slash := false + for i, c := range origCIDR { + if c == '.' { + dots++ + } + if c == '/' { + for dots < 3 { + s += ".0" + dots++ + } + slash = true + } + if i == len(origCIDR)-1 { + s += string(c) + bits := 32 - 8*(3-dots) + for dots < 3 { + s += ".0" + dots++ + } + if !slash { + s += fmt.Sprintf("/%d", bits) + } + } else { + s += string(c) + } + } + return s +} + +func (router *osRouter) Cleanup(route *Route) error { + glog.V(3).Infof("Cleaning up %s\n", route) + exists, err := isValidToAddOrDelete(router, route) + if err != nil { + return err + } + if !exists { + return nil + } + cmd := exec.Command("sudo", "route", "-n", "delete", route.DestCIDR.String()) + stdInAndOut, err := cmd.CombinedOutput() + if err != nil { + return err + } + msg := fmt.Sprintf("%s", stdInAndOut) + glog.V(4).Infof("%s", msg) + re := regexp.MustCompile("^delete net ([^:]*)$") + if !re.MatchString(msg) { + return fmt.Errorf("error deleting route: %s, %d", msg, len(strings.Split(msg, "\n"))) + } + return nil +} + + diff --git a/third_party/go9p/srv_pipe_freebsd.go b/third_party/go9p/srv_pipe_freebsd.go new file mode 100644 index 0000000000..453c52fec1 --- /dev/null +++ b/third_party/go9p/srv_pipe_freebsd.go @@ -0,0 +1,41 @@ +// Copyright 2009 The go9p Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package go9p + +import ( + "fmt" + "os" + "syscall" +) + +func (dir *pipeDir) dotu(path string, d os.FileInfo, upool Users, sysMode *syscall.Stat_t) { + u := upool.Uid2User(int(sysMode.Uid)) + g := upool.Gid2Group(int(sysMode.Gid)) + dir.Uid = u.Name() + if dir.Uid == "" { + dir.Uid = "none" + } + + dir.Gid = g.Name() + if dir.Gid == "" { + dir.Gid = "none" + } + dir.Muid = "none" + dir.Ext = "" + dir.Uidnum = uint32(u.Id()) + dir.Gidnum = uint32(g.Id()) + dir.Muidnum = NOUID + if d.Mode()&os.ModeSymlink != 0 { + var err error + dir.Ext, err = os.Readlink(path) + if err != nil { + dir.Ext = "" + } + } else if isBlock(d) { + dir.Ext = fmt.Sprintf("b %d %d", sysMode.Rdev>>24, sysMode.Rdev&0xFFFFFF) + } else if isChar(d) { + dir.Ext = fmt.Sprintf("c %d %d", sysMode.Rdev>>24, sysMode.Rdev&0xFFFFFF) + } +} diff --git a/third_party/go9p/ufs_freebsd.go b/third_party/go9p/ufs_freebsd.go new file mode 100644 index 0000000000..da9a10fae2 --- /dev/null +++ b/third_party/go9p/ufs_freebsd.go @@ -0,0 +1,239 @@ +// Copyright 2009 The go9p Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package go9p + +import ( + "fmt" + "os" + "os/user" + "path" + "strconv" + "strings" + "syscall" + "time" +) + +func atime(stat *syscall.Stat_t) time.Time { + return time.Unix(stat.Atimespec.Unix()) +} + +// IsBlock reports if the file is a block device +func isBlock(d os.FileInfo) bool { + stat := d.Sys().(*syscall.Stat_t) + return (stat.Mode & syscall.S_IFMT) == syscall.S_IFBLK +} + +// IsChar reports if the file is a character device +func isChar(d os.FileInfo) bool { + stat := d.Sys().(*syscall.Stat_t) + return (stat.Mode & syscall.S_IFMT) == syscall.S_IFCHR +} + +func dir2Qid(d os.FileInfo) *Qid { + var qid Qid + + qid.Path = d.Sys().(*syscall.Stat_t).Ino + qid.Version = uint32(d.ModTime().UnixNano() / 1000000) + qid.Type = dir2QidType(d) + + return &qid +} + +func dir2Dir(path string, d os.FileInfo, dotu bool, upool Users) (*Dir, error) { + if r := recover(); r != nil { + fmt.Print("stat failed: ", r) + return nil, &os.PathError{"dir2Dir", path, nil} + } + sysif := d.Sys() + if sysif == nil { + return nil, &os.PathError{"dir2Dir: sysif is nil", path, nil} + } + sysMode := sysif.(*syscall.Stat_t) + + dir := new(ufsDir) + dir.Qid = *dir2Qid(d) + dir.Mode = dir2Npmode(d, dotu) + dir.Atime = uint32(0 /*atime(sysMode).Unix()*/) + dir.Mtime = uint32(d.ModTime().Unix()) + dir.Length = uint64(d.Size()) + dir.Name = path[strings.LastIndex(path, "/")+1:] + + if dotu { + dir.dotu(path, d, upool, sysMode) + return &dir.Dir, nil + } + + unixUid := int(sysMode.Uid) + unixGid := int(sysMode.Gid) + dir.Uid = strconv.Itoa(unixUid) + dir.Gid = strconv.Itoa(unixGid) + + // BUG(akumar): LookupId will never find names for + // groups, as it only operates on user ids. + u, err := user.LookupId(dir.Uid) + if err == nil { + dir.Uid = u.Username + } + g, err := user.LookupId(dir.Gid) + if err == nil { + dir.Gid = g.Username + } + + /* For Akaros, we use the Muid as the link value. */ + if *Akaros && (d.Mode()&os.ModeSymlink != 0) { + dir.Muid, err = os.Readlink(path) + if err == nil { + dir.Mode |= DMSYMLINK + } + } + return &dir.Dir, nil +} + +func (dir *ufsDir) dotu(path string, d os.FileInfo, upool Users, sysMode *syscall.Stat_t) { + u := upool.Uid2User(int(sysMode.Uid)) + g := upool.Gid2Group(int(sysMode.Gid)) + dir.Uid = u.Name() + if dir.Uid == "" { + dir.Uid = "none" + } + + dir.Gid = g.Name() + if dir.Gid == "" { + dir.Gid = "none" + } + dir.Muid = "none" + dir.Ext = "" + dir.Uidnum = uint32(u.Id()) + dir.Gidnum = uint32(g.Id()) + dir.Muidnum = NOUID + if d.Mode()&os.ModeSymlink != 0 { + var err error + dir.Ext, err = os.Readlink(path) + if err != nil { + dir.Ext = "" + } + } else if isBlock(d) { + dir.Ext = fmt.Sprintf("b %d %d", sysMode.Rdev>>24, sysMode.Rdev&0xFFFFFF) + } else if isChar(d) { + dir.Ext = fmt.Sprintf("c %d %d", sysMode.Rdev>>24, sysMode.Rdev&0xFFFFFF) + } +} + +func (u *Ufs) Wstat(req *SrvReq) { + fid := req.Fid.Aux.(*ufsFid) + err := fid.stat() + if err != nil { + req.RespondError(err) + return + } + + dir := &req.Tc.Dir + if dir.Mode != 0xFFFFFFFF { + mode := dir.Mode & 0777 + if req.Conn.Dotu { + if dir.Mode&DMSETUID > 0 { + mode |= syscall.S_ISUID + } + if dir.Mode&DMSETGID > 0 { + mode |= syscall.S_ISGID + } + } + e := os.Chmod(fid.path, os.FileMode(mode)) + if e != nil { + req.RespondError(toError(e)) + return + } + } + + uid, gid := NOUID, NOUID + if req.Conn.Dotu { + uid = dir.Uidnum + gid = dir.Gidnum + } + + // Try to find local uid, gid by name. + if (dir.Uid != "" || dir.Gid != "") && !req.Conn.Dotu { + uid, err = lookup(dir.Uid, false) + if err != nil { + req.RespondError(err) + return + } + + // BUG(akumar): Lookup will never find gids + // corresponding to group names, because + // it only operates on user names. + gid, err = lookup(dir.Gid, true) + if err != nil { + req.RespondError(err) + return + } + } + + if uid != NOUID || gid != NOUID { + e := os.Chown(fid.path, int(uid), int(gid)) + if e != nil { + req.RespondError(toError(e)) + return + } + } + + if dir.Name != "" { + fmt.Printf("Rename %s to %s\n", fid.path, dir.Name) + // if first char is / it is relative to root, else relative to + // cwd. + var destpath string + if dir.Name[0] == '/' { + destpath = path.Join(u.Root, dir.Name) + fmt.Printf("/ results in %s\n", destpath) + } else { + fiddir, _ := path.Split(fid.path) + destpath = path.Join(fiddir, dir.Name) + fmt.Printf("rel results in %s\n", destpath) + } + err := os.Rename(fid.path, destpath) + fmt.Printf("rename %s to %s gets %v\n", fid.path, destpath, err) + if err != nil { + req.RespondError(toError(err)) + return + } + fid.path = destpath + } + + if dir.Length != 0xFFFFFFFFFFFFFFFF { + e := os.Truncate(fid.path, int64(dir.Length)) + if e != nil { + req.RespondError(toError(e)) + return + } + } + + // If either mtime or atime need to be changed, then + // we must change both. + if dir.Mtime != ^uint32(0) || dir.Atime != ^uint32(0) { + mt, at := time.Unix(int64(dir.Mtime), 0), time.Unix(int64(dir.Atime), 0) + if cmt, cat := (dir.Mtime == ^uint32(0)), (dir.Atime == ^uint32(0)); cmt || cat { + st, e := os.Stat(fid.path) + if e != nil { + req.RespondError(toError(e)) + return + } + switch cmt { + case true: + mt = st.ModTime() + default: + // at = time.Time(0)//atime(st.Sys().(*syscall.Stat_t)) + } + } + // macOS filesystem st_mtime values are only accurate to the second + // this ensures, 9p will only write mtime to the second #1375 + e := os.Chtimes(fid.path, at.Truncate(time.Second), mt.Truncate(time.Second)) + if e != nil { + req.RespondError(toError(e)) + return + } + } + + req.RespondRwstat() +} From 5a3d4f2870a8d16f6fbcc3dbc939a94cdbd55a01 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 26 Mar 2020 17:01:30 -0700 Subject: [PATCH 312/668] Fix StartHost error formatting mistake, improve err msg --- pkg/minikube/node/start.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 5f7b131a6e..eede1db480 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -348,7 +348,8 @@ func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*hos out.T(out.Workaround, `Run: "{{.delete}}", then "{{.start}} --alsologtostderr -v=1" to try again with more logging`, out.V{"delete": mustload.ExampleCmd(cc.Name, "delete"), "start": mustload.ExampleCmd(cc.Name, "start")}) - exit.WithError("Unable to start VM after repeated tries. Please try {{'minikube delete' if possible", err) + drv := host.Driver.DriverName() + exit.WithError(fmt.Sprintf(`Failed to start %s %s. "%s" may fix it.`, drv, driver.MachineType(drv), mustload.ExampleCmd(cc.Name, "start")), err) return host, exists } From c9a6d8dcf40f674821bc5dc31ea6d224f33afbb0 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 26 Mar 2020 17:33:56 -0700 Subject: [PATCH 313/668] Improve display of host startup errors --- pkg/minikube/exit/exit.go | 6 +++--- pkg/minikube/node/start.go | 10 +++------- pkg/minikube/problem/problem.go | 7 ++++++- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/pkg/minikube/exit/exit.go b/pkg/minikube/exit/exit.go index 4ed989d2a6..221001f86e 100644 --- a/pkg/minikube/exit/exit.go +++ b/pkg/minikube/exit/exit.go @@ -61,16 +61,16 @@ func WithCodeT(code int, format string, a ...out.V) { func WithError(msg string, err error) { p := problem.FromError(err, runtime.GOOS) if p != nil { - WithProblem(msg, p) + WithProblem(msg, err, p) } displayError(msg, err) os.Exit(Software) } // WithProblem outputs info related to a known problem and exits. -func WithProblem(msg string, p *problem.Problem) { +func WithProblem(msg string, err error, p *problem.Problem) { out.ErrT(out.Empty, "") - out.FatalT(msg) + out.ErrT(out.FailureType, "[{{.id}}] {{.msg}} {{.error}}", out.V{"msg": msg, "id": p.ID, "error": p.Err}) p.Display() if p.ShowIssueLink { out.ErrT(out.Empty, "") diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index eede1db480..e983e43c83 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -48,7 +48,6 @@ import ( "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/logs" "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/proxy" "k8s.io/minikube/pkg/util" @@ -344,12 +343,9 @@ func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*hos return host, exists } - out.T(out.FailureType, "StartHost failed again: {{.error}}", out.V{"error": err}) - out.T(out.Workaround, `Run: "{{.delete}}", then "{{.start}} --alsologtostderr -v=1" to try again with more logging`, - out.V{"delete": mustload.ExampleCmd(cc.Name, "delete"), "start": mustload.ExampleCmd(cc.Name, "start")}) - - drv := host.Driver.DriverName() - exit.WithError(fmt.Sprintf(`Failed to start %s %s. "%s" may fix it.`, drv, driver.MachineType(drv), mustload.ExampleCmd(cc.Name, "start")), err) + // Don't use host.Driver to avoid nil pointer deref + drv := cc.Driver + exit.WithError(fmt.Sprintf(`%s %s start failed`, drv, driver.MachineType(drv)), err) return host, exists } diff --git a/pkg/minikube/problem/problem.go b/pkg/minikube/problem/problem.go index 1de611b0a7..d5465a1830 100644 --- a/pkg/minikube/problem/problem.go +++ b/pkg/minikube/problem/problem.go @@ -57,7 +57,6 @@ type match struct { // Display problem metadata to the console func (p *Problem) Display() { - out.ErrT(out.FailureType, "Error: [{{.id}}] {{.error}}", out.V{"id": p.ID, "error": p.Err}) out.ErrT(out.Tip, "Suggestion: {{.advice}}", out.V{"advice": translate.T(p.Advice)}) if p.URL != "" { out.ErrT(out.Documentation, "Documentation: {{.url}}", out.V{"url": p.URL}) @@ -65,6 +64,12 @@ func (p *Problem) Display() { if len(p.Issues) == 0 { return } + + if len(p.Issues) == 1 { + out.ErrT(out.Issues, "Related issue: {{.url}}", out.V{"url": fmt.Sprintf("%s/%d", issueBase, p.Issues[0])}) + return + } + out.ErrT(out.Issues, "Related issues:") issues := p.Issues if len(issues) > 3 { From f00ac2c05a08a91e09757e8eb85b7bdc3359ff16 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 26 Mar 2020 17:36:05 -0700 Subject: [PATCH 314/668] Fix null deref in start host err --- pkg/minikube/node/start.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index eede1db480..44ff9bb1cc 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -348,7 +348,7 @@ func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*hos out.T(out.Workaround, `Run: "{{.delete}}", then "{{.start}} --alsologtostderr -v=1" to try again with more logging`, out.V{"delete": mustload.ExampleCmd(cc.Name, "delete"), "start": mustload.ExampleCmd(cc.Name, "start")}) - drv := host.Driver.DriverName() + drv := cc.Driver exit.WithError(fmt.Sprintf(`Failed to start %s %s. "%s" may fix it.`, drv, driver.MachineType(drv), mustload.ExampleCmd(cc.Name, "start")), err) return host, exists } From 2d825d649831bedbef0c6eafb03cb1cee9d4fa2d Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 26 Mar 2020 18:04:49 -0700 Subject: [PATCH 315/668] translations facelift --- pkg/minikube/extract/extract.go | 16 +- pkg/minikube/translate/translate.go | 20 ++- .../en/docs/Contributing/translations.md | 84 ++++++++- translations/de.json | 146 +++++++++------- translations/es.json | 145 ++++++++------- translations/fr.json | 165 +++++++++--------- translations/ja.json | 144 ++++++++------- translations/ko.json | 117 +++++++++---- translations/pl.json | 140 ++++++++------- translations/zh-CN.json | 119 ++++++++----- 10 files changed, 673 insertions(+), 423 deletions(-) diff --git a/pkg/minikube/extract/extract.go b/pkg/minikube/extract/extract.go index 8453d53ea5..584acd827e 100644 --- a/pkg/minikube/extract/extract.go +++ b/pkg/minikube/extract/extract.go @@ -29,6 +29,10 @@ import ( "strconv" "strings" + // initflag must be imported before any other minikube pkg. + // Fix for https://github.com/kubernetes/minikube/issues/4866 + _ "k8s.io/minikube/pkg/initflag" + "github.com/golang-collections/collections/stack" "github.com/pkg/errors" "k8s.io/minikube/pkg/util/lock" @@ -45,6 +49,7 @@ var blacklist = []string{ "env {{.docker_env}}", "\\n", "==\u003e {{.name}} \u003c==", + "- {{.profile}}", } // ErrMapFile is a constant to refer to the err_map file, which contains the Advice strings. @@ -450,14 +455,17 @@ func writeStringsToFiles(e *state, output string) error { return nil } fmt.Printf("Writing to %s\n", filepath.Base(path)) - var currentTranslations map[string]interface{} + currentTranslations := make(map[string]interface{}) f, err := ioutil.ReadFile(path) if err != nil { return errors.Wrap(err, "reading translation file") } - err = json.Unmarshal(f, ¤tTranslations) - if err != nil { - return errors.Wrap(err, "unmarshalling current translations") + // Unmarhsal nonempty files + if len(f) > 0 { + err = json.Unmarshal(f, ¤tTranslations) + if err != nil { + return errors.Wrap(err, "unmarshalling current translations") + } } // Make sure to not overwrite already translated strings diff --git a/pkg/minikube/translate/translate.go b/pkg/minikube/translate/translate.go index 960352da82..4a6092515a 100644 --- a/pkg/minikube/translate/translate.go +++ b/pkg/minikube/translate/translate.go @@ -18,6 +18,8 @@ package translate import ( "encoding/json" + "fmt" + "path" "strings" "github.com/cloudfoundry-attic/jibber_jabber" @@ -73,11 +75,23 @@ func DetermineLocale() { } // Load translations for preferred language into memory. - translationFile := "translations/" + preferredLanguage.String() + ".json" + p := preferredLanguage.String() + translationFile := path.Join("translations", fmt.Sprintf("%s.json", p)) t, err := Asset(translationFile) if err != nil { - glog.Infof("Failed to load translation file for %s: %v", preferredLanguage.String(), err) - return + // Attempt to find a more broad locale, e.g. fr instead of fr-FR. + if strings.Contains(p, "-") { + p = strings.Split(p, "-")[0] + translationFile := path.Join("translations", fmt.Sprintf("%s.json", p)) + t, err = Asset(translationFile) + if err != nil { + glog.Infof("Failed to load translation file for %s: %v", p, err) + return + } + } else { + glog.Infof("Failed to load translation file for %s: %v", preferredLanguage.String(), err) + return + } } err = json.Unmarshal(t, &Translations) diff --git a/site/content/en/docs/Contributing/translations.md b/site/content/en/docs/Contributing/translations.md index 40f7cfdee1..aa1f8b8c8d 100644 --- a/site/content/en/docs/Contributing/translations.md +++ b/site/content/en/docs/Contributing/translations.md @@ -8,14 +8,82 @@ description: > All translations are stored in the top-level `translations` directory. +### Adding a New Language +* Add a new json file in the translations directory with the locale code of the language you want to add + translations for, e.g. fr for French. + ``` + ~/minikube$ touch translations/ar.json + ~/minikube$ ls translations/ + de.json es.json fr.json ja.json ko.json pl.json zh-CN.json + ``` +* Run `make extract` from root to populate that file with the strings to translate in json + form. + ``` + ~/minikube$ make extract + go run cmd/extract/extract.go + Compiling translation strings... + Writing to de.json + Writing to es.json + Writing to fr.json + Writing to ja.json + Writing to ko.json + Writing to pl.json + Writing to zh-CN.json + Done! + ``` +* Add translated strings as the value of the map where the English phrase is the key. + * The file will be json file with all of the English phrases as the keys of a map + ``` + ~/minikube$ head translations/fr.json + { + "\"The '{{.minikube_addon}}' addon is disabled": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "", + "\"{{.name}}\" profile does not exist, trying anyways.": "", + "'none' driver does not support 'minikube docker-env' command": "", + "'none' driver does not support 'minikube mount' command": "", + "'none' driver does not support 'minikube podman-env' command": "", + "'none' driver does not support 'minikube ssh' command": "", + "'{{.driver}}' driver reported an issue: {{.error}}": "", + ``` + * Add the translations as the values of the map, keeping in mind that anything in double braces `{{}}` are variable names describing what gets injected and should not be translated. + ``` + ~/minikube$ vi translations/ar.json + { + "\"The '{{.minikube_addon}}' addon is disabled": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "\""{{.machineName}} n'exist pas, rien a arrêter.", + "\"{{.name}}\" profile does not exist, trying anyways.": "", + "'none' driver does not support 'minikube docker-env' command": "", + "'none' driver does not support 'minikube mount' command": "", + "'none' driver does not support 'minikube podman-env' command": "", + "'none' driver does not support 'minikube ssh' command": "", + "'{{.driver}}' driver reported an issue: {{.error}}": "", + ``` + ### Adding Translations To an Existing Language * Run `make extract` to make sure all strings are up to date -* Add translated strings to the appropriate json files in the 'translations' - directory. +* Edit the appropriate json file in the 'translations' directory, in the same way described above. -### Adding a New Language -* Add a new json file with the locale code of the language you want to add - translations for, e.g. en for English. -* Run `make extract` to populate that file with the strings to translate in json - form. -* Add translations to as many strings as you'd like. +### Testing translations +* Once you have all the translations you want, save the file and rebuild the minikube from scratch to pick up your new translations: + ``` + ~/minikube$ make clean + rm -rf ./out + rm -f pkg/minikube/assets/assets.go + rm -f pkg/minikube/translate/translations.go + rm -rf ./vendor + ~/minikube$ make + ``` + Note: the clean is required to regenerate the embedded `translations.go` file + +* You now have a fresh minikube binary in the `out` directory. If your system locale is that of the language you added translations for, a simple `out/minikube start` will work as a test, assuming you translated phrases from `minikube start`. You can use whatever command you'd like in that way. + +* If you have a different system locale, you can override the printed language using the LANG environment variable: + ``` + ~/minikube$ LANG=fr out/minikube start + 😄 minikube v1.9.0-beta.2 sur Darwin 10.14.6 + ✨ Choix automatique du driver hyperkit + 🔥 Création de VM hyperkit (CPUs=2, Mémoire=4000MB, Disque=20000MB)... + 🐳 Préparation de Kubernetes v1.18.0 sur Docker 19.03.8... + 🌟 Installation des addons: default-storageclass, storage-provisioner + 🏄 Terminé ! kubectl est maintenant configuré pour utiliser "minikube". + ``` diff --git a/translations/de.json b/translations/de.json index 6f179495a3..248328d176 100644 --- a/translations/de.json +++ b/translations/de.json @@ -1,16 +1,12 @@ { "\"The '{{.minikube_addon}}' addon is disabled": "", - "\"{{.name}}\" profile does not exist": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "", "\"{{.name}}\" profile does not exist, trying anyways.": "", - "\"{{.node_name}}\" stopped.": "", - "\"{{.profile_name}}\" does not exist, nothing to stop": "", - "\"{{.profile_name}}\" host does not exist, unable to show an IP": "", "'none' driver does not support 'minikube docker-env' command": "", "'none' driver does not support 'minikube mount' command": "", "'none' driver does not support 'minikube podman-env' command": "", "'none' driver does not support 'minikube ssh' command": "", "'{{.driver}}' driver reported an issue: {{.error}}": "", - "'{{.profile}}' is not running": "", "- {{.profile}}": "", "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "", "A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "", @@ -32,12 +28,11 @@ "Adds a node to the given cluster config, and starts it.": "", "Adds a node to the given cluster.": "", "Advanced Commands:": "", - "After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Aliases": "", "Allow user prompts for more information": "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "Alternatives Bild-Repository zum Abrufen von Docker-Images. Dies ist hilfreich, wenn Sie nur eingeschränkten Zugriff auf gcr.io haben. Stellen Sie \\\"auto\\\" ein, dann wählt minikube eins für sie aus. Nutzer vom chinesischen Festland können einen lokalen gcr.io-Mirror wie registry.cn-hangzhou.aliyuncs.com/google_containers verwenden.", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "Größe des der minikube-VM zugewiesenen Arbeitsspeichers (Format: \u003cNummer\u003e [\u003cEinheit\u003e], wobei Einheit = b, k, m oder g)", - "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", + "Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", "Amount of time to wait for a service in seconds": "", "Amount of time to wait for service in seconds": "", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", @@ -48,6 +43,7 @@ "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", "Bind Address: {{.Address}}": "", "Block until the apiserver is servicing API requests": "", + "Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "", "Cannot find directory {{.path}} for mount": "", "Cannot use both --output and --format options": "", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", @@ -66,9 +62,9 @@ "Could not process error from failed deletion": "", "Could not process errors from failed deletion": "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "Ländercode des zu verwendenden Image Mirror. Lassen Sie dieses Feld leer, um den globalen zu verwenden. Nutzer vom chinesischen Festland stellen cn ein.", - "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", + "Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", "Creating mount {{.name}} ...": "Bereitstellung {{.name}} wird erstellt...", - "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", "DEPRECATED, use `driver` instead.": "", "Default group id used for the mount": "", "Default user id used for the mount": "", @@ -97,10 +93,9 @@ "Done! kubectl is now configured to use \"{{.name}}\"": "", "Done! kubectl is now configured to use \"{{.name}}__1": "Fertig! kubectl ist jetzt für die Verwendung von \"{{.name}}\" konfiguriert", "Download complete!": "Download abgeschlossen!", + "Downloading Kubernetes {{.version}} preload ...": "", "Downloading VM boot image ...": "", "Downloading driver {{.driver}}:": "", - "Downloading preloaded images tarball for k8s {{.version}} ...": "", - "Downloading {{.name}} {{.version}}": "", "ERROR creating `registry-creds-acr` secret": "", "ERROR creating `registry-creds-dpr` secret": "", "ERROR creating `registry-creds-ecr` secret: {{.error}}": "", @@ -109,7 +104,6 @@ "Enable addons. see `minikube addons list` for a list of valid addon names.": "", "Enable experimental NVIDIA GPU support in minikube": "Experimentellen NVIDIA GPU-Support in minikube aktivieren", "Enable host resolver for NAT DNS requests (virtualbox driver only)": "Host Resolver für NAT DNS-Anfragen aktivieren (nur Virtualbox-Treiber)", - "Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "", "Enable proxy for NAT DNS requests (virtualbox driver only)": "Proxy für NAT-DNS-Anforderungen aktivieren (nur Virtualbox-Treiber)", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\": "Standard-CNI-Plugin-in (/etc/cni/net.d/k8s.conf) aktivieren. Wird in Verbindung mit \"--network-plugin = cni\" verwendet", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "", @@ -131,45 +125,29 @@ "Error finding port for mount": "", "Error generating set output": "", "Error generating unset output": "", - "Error getting IP": "", - "Error getting client": "", - "Error getting client: {{.error}}": "", - "Error getting cluster": "", "Error getting cluster bootstrapper": "", "Error getting cluster config": "", - "Error getting config": "", - "Error getting control plane": "", "Error getting host": "", - "Error getting host IP": "", - "Error getting host status": "", - "Error getting machine logs": "", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", "Error getting primary control plane": "", - "Error getting primary cp": "", - "Error getting service status": "", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", "Error getting ssh client": "", "Error getting the host IP address to use from within the VM": "", - "Error host driver ip status": "", "Error killing mount process": "", - "Error loading api": "", - "Error loading profile config": "", "Error loading profile config: {{.error}}": "", "Error loading profile {{.name}}: {{.error}}": "Fehler beim Laden des Profils {{.name}}: {{.error}}", "Error opening service": "", "Error parsing Driver version: {{.error}}": "Fehler beim Parsen der Driver-Version: {{.error}}", "Error parsing minikube version: {{.error}}": "Fehler beim Parsen der minikube-Version: {{.error}}", "Error reading {{.path}}: {{.error}}": "", - "Error retrieving node": "", "Error starting cluster": "", "Error starting mount": "", - "Error starting node": "", "Error while setting kubectl current context : {{.error}}": "", "Error writing mount pid": "", - "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "", "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Fehler: Sie haben Kubernetes v{{.new}} ausgewählt, aber auf dem vorhandenen Cluster für Ihr Profil wird Kubernetes v{{.old}} ausgeführt. Zerstörungsfreie Downgrades werden nicht unterstützt. Sie können jedoch mit einer der folgenden Optionen fortfahren:\n* Erstellen Sie den Cluster mit Kubernetes v{{.new}} neu: Führen Sie \"minikube delete {{.profile}}\" und dann \"minikube start {{.profile}} - kubernetes-version = {{.new}}\" aus.\n* Erstellen Sie einen zweiten Cluster mit Kubernetes v{{.new}}: Führen Sie \"minikube start -p \u003cnew name\u003e --kubernetes-version = {{.new}}\" aus.\n* Verwenden Sie den vorhandenen Cluster mit Kubernetes v {{.old}} oder höher: Führen Sie \"minikube start {{.profile}} --kubernetes-version = {{.old}}\" aus.", "Error: [{{.id}}] {{.error}}": "", "Examples": "", + "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "", "Exiting": "Wird beendet", "Exiting.": "", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "", @@ -177,38 +155,38 @@ "Failed to cache ISO": "", "Failed to cache and load images": "", "Failed to cache binaries": "", + "Failed to cache images": "", "Failed to cache images to tar": "", "Failed to cache kubectl": "", "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "Fehler beim Ändern der Berechtigungen für {{.minikube_dir_path}}: {{.error}}", - "Failed to check if machine exists": "", "Failed to check main repository and mirrors for images for images": "", "Failed to delete cluster: {{.error}}": "Fehler beim Löschen des Clusters: {{.error}}", "Failed to delete cluster: {{.error}}__1": "Fehler beim Löschen des Clusters: {{.error}}", "Failed to delete images": "", "Failed to delete images from config": "", - "Failed to delete node {{.name}}": "", "Failed to enable container runtime": "", "Failed to generate config": "", + "Failed to get API Server URL": "", "Failed to get bootstrapper": "", "Failed to get command runner": "", - "Failed to get driver URL": "", "Failed to get image map": "", "Failed to get machine client": "", "Failed to get service URL: {{.error}}": "", "Failed to kill mount process: {{.error}}": "Fehler beim Beenden des Bereitstellungsprozesses: {{.error}}", "Failed to list cached images": "", + "Failed to parse kubernetes version": "", "Failed to reload cached images": "", "Failed to save config": "", "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "NO_PROXY Env konnte nicht festgelegt werden. Benutzen Sie `export NO_PROXY = $ NO_PROXY, {{. Ip}}", "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "", "Failed to setup certs": "", "Failed to setup kubeconfig": "", - "Failed to start node {{.name}}": "", "Failed to stop node {{.name}}": "", "Failed to update cluster": "", "Failed to update config": "", "Failed unmount: {{.error}}": "", "File permissions used for the mount": "", + "Filter to use only VM Drivers": "", "Flags": "", "Follow": "", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "Für beste Ergebnisse installieren Sie kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/", @@ -218,13 +196,16 @@ "Force minikube to perform possibly dangerous operations": "minikube zwingen, möglicherweise gefährliche Operationen durchzuführen", "Found network options:": "Gefundene Netzwerkoptionen:", "Found {{.number}} invalid profile(s) !": "", + "Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Generate unable to parse memory '{{.memory}}': {{.error}}": "", "Gets the kubernetes URL(s) for the specified service in your local cluster": "", "Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "", "Gets the logs of the running instance, used for debugging minikube, not user code.": "", "Gets the status of a local kubernetes cluster": "", "Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "", "Gets the value of PROPERTY_NAME from the minikube config file": "", - "Getting machine config failed": "", + "Getting bootstrapper": "", + "Getting primary control plane": "", "Global Flags": "", "Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "", "Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", @@ -235,6 +216,7 @@ "Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", + "If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "", "If set, install addons. Defaults to true.": "", "If set, pause all namespaces": "", "If set, unpause all namespaces": "", @@ -251,8 +233,9 @@ "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "Unsichere Docker-Registrys, die an den Docker-Daemon übergeben werden. Der CIDR-Bereich des Standarddienstes wird automatisch hinzugefügt.", "Install VirtualBox, or select an alternative value for --driver": "", "Install the latest hyperkit binary, and run 'minikube delete'": "", - "Invalid size passed in argument: {{.error}}": "", "IsEnabled failed": "", + "Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "", + "Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "", "Kill the mount process spawned by minikube start": "", "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "", "Kubernetes {{.version}} is not supported by this release of minikube": "", @@ -267,7 +250,7 @@ "Local folders to share with Guest via NFS mounts (hyperkit driver only)": "Lokale Ordner, die über NFS-Bereitstellungen für Gast freigegeben werden (nur Hyperkit-Treiber)", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "Speicherort des VPNKit-Sockets, der für das Netzwerk verwendet wird. Wenn leer, wird Hyperkit VPNKitSock deaktiviert. Wenn 'auto' die Docker for Mac VPNKit-Verbindung verwendet, wird andernfalls der angegebene VSock verwendet (nur Hyperkit-Treiber).", "Location of the minikube iso": "Speicherort der minikube-ISO", - "Location of the minikube iso.": "", + "Locations to fetch the minikube ISO from.": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "", "Message Size: {{.size}}": "", @@ -285,15 +268,18 @@ "NOTE: This process must stay alive for the mount to be accessible ...": "", "Networking and Connectivity Commands:": "", "No minikube profile was found. You can create one using `minikube start`.": "", + "Node \"{{.node_name}}\" stopped.": "", "Node may be unable to resolve external DNS records": "", "Node operations": "", "Node {{.name}} was successfully deleted.": "", + "Node {{.nodeName}} does not exist.": "", + "Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "", "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "Keines der bekannten Repositories an Ihrem Standort ist zugänglich. {{.image_repository_name}} wird als Fallback verwendet.", "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "Keines der bekannten Repositories ist zugänglich. Erwägen Sie, ein alternatives Image-Repository mit der Kennzeichnung --image-repository anzugeben", "Not passing {{.name}}={{.value}} to docker env.": "", - "Noticed that you are using minikube docker-env:": "", + "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "", + "Number of CPUs allocated to Kubernetes.": "", "Number of CPUs allocated to the minikube VM": "Anzahl der CPUs, die der minikube-VM zugeordnet sind", - "Number of CPUs allocated to the minikube VM.": "", "Number of lines back to go within the log": "", "OS release is {{.pretty_name}}": "", "Open the addons URL with https instead of http": "", @@ -314,6 +300,7 @@ "Please install the minikube hyperkit VM driver, or select an alternative --driver": "", "Please install the minikube kvm2 VM driver, or select an alternative --driver": "", "Please make sure the service you are looking for is deployed or is in the correct namespace.": "", + "Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "", "Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "Aktualisieren Sie '{{.driver_executable}}'. {{.documentation_url}}", "Populates the specified folder with documentation in markdown about minikube": "", @@ -327,10 +314,10 @@ "Profile gets or sets the current minikube profile": "", "Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "", "Provide VM UUID to restore MAC address (hyperkit driver only)": "Geben Sie die VM-UUID an, um die MAC-Adresse wiederherzustellen (nur Hyperkit-Treiber)", + "Pulling base image ...": "", "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "", "Rebuild libvirt with virt-network support": "", "Received {{.name}} signal": "", - "Reconfiguring existing host ...": "", "Registry mirrors to pass to the Docker daemon": "Registry-Mirror, die an den Docker-Daemon übergeben werden", "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "", "Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "", @@ -341,7 +328,10 @@ "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "", "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "Die angeforderte Festplattengröße {{.requested_size}} liegt unter dem Mindestwert von {{.minimum_size}}.", "Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "Die angeforderte Speicherzuordnung ({{.memory}} MB) ist geringer als die Standardspeicherzuordnung von {{.default_memorysize}} MB. Beachten Sie, dass minikube möglicherweise nicht richtig funktioniert oder unerwartet abstürzt.", + "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "", "Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "Die angeforderte Speicherzuweisung {{.requested_size}} liegt unter dem zulässigen Mindestwert von {{.minimum_size}}.", + "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "", + "Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "", "Retrieve the ssh identity key path of the specified cluster": "", "Retrieve the ssh identity key path of the specified cluster.": "", "Retrieves the IP address of the running cluster": "", @@ -354,8 +344,10 @@ "Run minikube from the C: drive.": "", "Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "", "Run the minikube command as an Administrator": "", + "Run: \"{{.delete}}\", then \"{{.start}} --alsologtostderr -v=1\" to try again with more logging": "", "Run: 'chmod 600 $HOME/.kube/config'": "", "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "", "Set failed": "", "Set flag to delete all profiles": "", "Set this flag to delete the '.minikube' folder from your user directory.": "", @@ -381,8 +373,8 @@ "Specify the 9p version that the mount should use": "", "Specify the ip that the mount should be setup on": "", "Specify the mount filesystem type (supported types: 9p)": "", - "Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "", - "Starting node": "", + "StartHost failed again: {{.error}}": "", + "StartHost failed, but will try again: {{.error}}": "", "Starting tunnel for service {{.service}}.": "", "Starts a local kubernetes cluster": "Startet einen lokalen Kubernetes-Cluster", "Starts a node.": "", @@ -395,7 +387,6 @@ "Successfully added {{.name}} to {{.cluster}}!": "", "Successfully deleted all profiles": "", "Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "", - "Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "", "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "", "Suggestion: {{.advice}}": "", "Suggestion: {{.fix}}": "", @@ -427,12 +418,16 @@ "The cluster dns domain name used in the kubernetes cluster": "Der DNS-Domänenname des Clusters, der im Kubernetes-Cluster verwendet wird", "The container runtime to be used (docker, crio, containerd)": "Die zu verwendende Container-Laufzeit (Docker, Crio, Containerd)", "The container runtime to be used (docker, crio, containerd).": "", + "The control plane for \"{{.name}}\" is paused!": "", + "The control plane node \"{{.name}}\" does not exist.": "", + "The control plane node is not running (state={{.state}})": "", + "The control plane node must be running for this command": "", "The cri socket path to be used": "Der zu verwendende Cri-Socket-Pfad", "The cri socket path to be used.": "", - "The docker service within '{{.profile}}' is not active": "", + "The docker service within '{{.name}}' is not active": "", + "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", "The driver '{{.driver}}' is not supported on {{.os}}": "Der Treiber '{{.driver}}' wird auf {{.os}} nicht unterstützt", - "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "", - "The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "", + "The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "Der Name des virtuellen Hyperv-Switch. Standardmäßig zuerst gefunden. (nur Hyperv-Treiber)", "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "", "The initial time interval for each check that wait performs in seconds": "", @@ -444,10 +439,14 @@ "The name of the node to delete": "", "The name of the node to start": "", "The node to get logs from. Defaults to the primary control plane.": "", + "The node to ssh into. Defaults to the primary control plane.": "", + "The none driver is not compatible with multi-node clusters.": "", + "The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "", "The number of bytes to use for 9p packet payload": "", + "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", - "The podman service within '{{.profile}}' is not active": "", + "The podman service within '{{.cluster}}' is not active": "", "The service namespace": "", "The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "", "The services namespace": "", @@ -456,19 +455,24 @@ "The value passed to --format is invalid: {{.error}}": "", "The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "", "The {{.driver_name}} driver should not be used with root privileges.": "Der Treiber {{.driver_name}} sollte nicht mit Root-Rechten verwendet werden.", + "There is no local cluster named \"{{.cluster}}\"": "", "There's a new version for '{{.driver_executable}}'. Please consider upgrading. {{.documentation_url}}": "Es gibt eine neue Version für '{{.driver_executable}}'. Bitte erwägen Sie ein Upgrade. {{.documentation_url}}", "These changes will take effect upon a minikube delete and then a minikube start": "", "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "Dies kann auch automatisch erfolgen, indem Sie die env var CHANGE_MINIKUBE_NONE_USER = true setzen", + "This control plane is not running! (state={{.state}})": "", + "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "Dadurch wird der vorhandene Kubectl-Kontext beibehalten und ein minikube-Kontext erstellt.", "This will start the mount daemon and automatically mount files into minikube": "Dadurch wird der Mount-Daemon gestartet und die Dateien werden automatisch in minikube geladen", "This will start the mount daemon and automatically mount files into minikube.": "", + "Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "", "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "Tipp: Um diesen Root-Cluster zu entfernen, führen Sie Folgendes aus: sudo {{.cmd}} delete", "To connect to this cluster, use: kubectl --context={{.name}}": "Verwenden Sie zum Herstellen einer Verbindung zu diesem Cluster: kubectl --context = {{.name}}", "To connect to this cluster, use: kubectl --context={{.name}}__1": "Verwenden Sie zum Herstellen einer Verbindung zu diesem Cluster: kubectl --context = {{.name}}", "To connect to this cluster, use: kubectl --context={{.profile_name}}": "", "To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "", - "To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "", + "To fix this, run: {{.command}}": "", + "To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "", "To see addons list for other profiles use: `minikube addons -p name list`": "", "To start minikube with HyperV Powershell must be in your PATH`": "", "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "Möglicherweise müssen Sie Kubectl- oder minikube-Befehle verschieben, um sie als eigenen Nutzer zu verwenden. Um beispielsweise Ihre eigenen Einstellungen zu überschreiben, führen Sie aus:", @@ -478,24 +482,31 @@ "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", + "Unable to find control plane": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get VM IP address": "", "Unable to get addon status for {{.name}}: {{.error}}": "", "Unable to get bootstrapper: {{.error}}": "Bootstrapper kann nicht abgerufen werden: {{.error}}", + "Unable to get command runner": "", + "Unable to get control plane status: {{.error}}": "", "Unable to get current user": "", + "Unable to get driver IP": "", + "Unable to get machine status": "", "Unable to get runtime": "", - "Unable to get the status of the {{.name}} cluster.": "", "Unable to kill mount process: {{.error}}": "", "Unable to load cached images from config file.": "Zwischengespeicherte Bilder können nicht aus der Konfigurationsdatei geladen werden.", "Unable to load cached images: {{.error}}": "", "Unable to load config: {{.error}}": "Konfig kann nicht geladen werden: {{.error}}", + "Unable to load host": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "\"{{.Kubernetes_version}}\" kann nicht geparst werden: {{.error}}", "Unable to parse default Kubernetes version from constants: {{.error}}": "", + "Unable to parse memory '{{.memory}}': {{.error}}": "", "Unable to parse oldest Kubernetes version from constants: {{.error}}": "", "Unable to pull images, which may be OK: {{.error}}": "Bilder können nicht abgerufen werden, was möglicherweise kein Problem darstellt: {{.error}}", - "Unable to remove machine directory: %v": "", - "Unable to start VM. Please investigate and run 'minikube delete' if possible": "", + "Unable to remove machine directory": "", + "Unable to restart cluster, will reset it: {{.error}}": "", + "Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "", "Unable to stop VM": "", "Unable to update {{.driver}} driver: {{.error}}": "", "Unable to verify SSH connectivity: {{.error}}. Will retry...": "", @@ -506,6 +517,8 @@ "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "", "Unset variables instead of setting them": "", "Update server returned an empty list": "", + "Updating node": "", + "Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "", "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "", "Upgrading from Kubernetes {{.old}} to {{.new}}": "Upgrade von Kubernetes {{.old}} auf {{.new}}", "Usage": "", @@ -526,11 +539,11 @@ "Userspace file server:": "", "Using image repository {{.name}}": "Verwenden des Image-Repositorys {{.name}}", "Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "", - "Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "", "Using the {{.driver}} driver based on existing profile": "", "Using the {{.driver}} driver based on user configuration": "", "VM driver is one of: %v": "VM-Treiber ist einer von: %v", "VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "", + "Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "", "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "", "Verify the IP address of the running cluster in kubeconfig.": "", "Verifying dashboard health ...": "", @@ -545,11 +558,12 @@ "Wait failed": "", "Wait failed: {{.error}}": "", "Wait until Kubernetes core services are healthy before exiting": "Warten Sie vor dem Beenden, bis die Kerndienste von Kubernetes fehlerfrei arbeiten", - "Waiting for cluster to come online ...": "", "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "Als Root für die NFS-Freigaben wird standardmäßig /nfsshares verwendet (nur Hyperkit-Treiber)", "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "", "You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "Sie scheinen einen Proxy zu verwenden, aber Ihre NO_PROXY-Umgebung enthält keine minikube-IP ({{.ip_address}}). Weitere Informationen finden Sie unter {{.documentation_url}}", + "You can also use 'minikube kubectl -- get pods' to invoke a matching version": "", "You can delete them using the following command(s):": "", + "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "", "You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "Möglicherweise müssen Sie die VM \"{{.name}}\" manuell von Ihrem Hypervisor entfernen", "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "", "You must specify a service name": "", @@ -558,45 +572,43 @@ "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "", "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "", "Your minikube vm is not running, try minikube start.": "", + "adding node": "", "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "", "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "", "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "", - "api load": "", "bash completion failed": "", "call with cleanup=true to remove old tunnels": "", - "command runner": "", "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "", "config view failed": "", - "creating api client": "", "dashboard service is not running: {{.error}}": "", + "deleting node": "", "disable failed": "", "dry-run mode. Validates configuration, but does not mutate system state": "", "dry-run validation complete!": "", "enable failed": "", "error creating clientset": "", - "error creating machine client": "", "error getting primary control plane": "", "error getting ssh port": "", "error parsing the input ip address for mount": "", "error starting tunnel": "", "error stopping tunnel": "", "failed to open browser: {{.error}}": "", - "getting config": "", - "getting primary control plane": "", + "generating join token": "", "if true, will embed the certs in kubeconfig.": "", "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "", + "initialization failed, will try again: {{.error}}": "", + "joining cluster": "", "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "", "kubectl and minikube configuration will be stored in {{.home_folder}}": "Konfiguration von Kubectl und minikube wird in {{.home_folder}} gespeichert", "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "", "kubectl proxy": "", - "loading config": "", + "libmachine failed": "", "logdir set failed": "", - "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "", "max time to wait per Kubernetes core services to be healthy.": "", "minikube addons list --output OUTPUT. json, list": "", "minikube is exiting due to an error. If the above message is not useful, open an issue:": "", "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "", - "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "", + "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "", "minikube profile was successfully set to {{.profile_name}}": "", "minikube status --output OUTPUT. json, text": "", "minikube {{.version}} is available! Download it: {{.url}}": "", @@ -605,14 +617,16 @@ "mount failed": "", "namespaces to pause": "", "namespaces to unpause": "", + "none driver does not support multi-node clusters": "", "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", "pause containers": "", "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "", - "profile {{.name}} is not running.": "", "reload cached images.": "", "reloads images previously added using the 'cache add' subcommand": "", "retrieving node": "", + "saving node": "", "service {{.namespace_name}}/{{.service_name}} has no node port": "", + "setting up certs": "", "stat failed": "", "status json failure": "", "status text failure": "", @@ -637,17 +651,17 @@ "usage: minikube delete": "", "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "", "zsh completion failed": "", + "{{.cluster}} IP has been updated to point at {{.ip}}": "", + "{{.cluster}} IP was already correctly configured for {{.ip}}": "", + "{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "", "{{.driver}} does not appear to be installed": "", "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "", "{{.extra_option_component_name}}.{{.key}}={{.value}}": "", - "{{.machine}} IP has been updated to point at {{.ip}}": "", - "{{.machine}} IP was already correctly configured for {{.ip}}": "", - "{{.name}} cluster does not exist": "", "{{.name}} has no available configuration options": "", "{{.name}} is already running": "", "{{.name}} was successfully configured": "", "{{.name}}\" profile does not exist": "Profil \"{{.name}}\" existiert nicht", - "{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "", + "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "", "{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.prefix}}minikube {{.version}} auf {{.platform}}", "{{.type}} is not yet a supported filesystem. We will try anyways!": "", "{{.url}} is not accessible: {{.error}}": "" diff --git a/translations/es.json b/translations/es.json index 53f8e3f3f5..ee0814d4a0 100644 --- a/translations/es.json +++ b/translations/es.json @@ -1,16 +1,13 @@ { "\"The '{{.minikube_addon}}' addon is disabled": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "", "\"{{.name}}\" profile does not exist": "El perfil \"{{.name}}\" no existe", "\"{{.name}}\" profile does not exist, trying anyways.": "", - "\"{{.node_name}}\" stopped.": "", - "\"{{.profile_name}}\" does not exist, nothing to stop": "", - "\"{{.profile_name}}\" host does not exist, unable to show an IP": "", "'none' driver does not support 'minikube docker-env' command": "", "'none' driver does not support 'minikube mount' command": "", "'none' driver does not support 'minikube podman-env' command": "", "'none' driver does not support 'minikube ssh' command": "", "'{{.driver}}' driver reported an issue: {{.error}}": "", - "'{{.profile}}' is not running": "", "- {{.profile}}": "", "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "", "A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "", @@ -32,12 +29,11 @@ "Adds a node to the given cluster config, and starts it.": "", "Adds a node to the given cluster.": "", "Advanced Commands:": "", - "After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Aliases": "", "Allow user prompts for more information": "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "Repositorio de imágenes alternativo del que extraer imágenes de Docker. Puedes usarlo cuando tengas acceso limitado a gcr.io. Si quieres que minikube elija uno por ti, solo tienes que definir el valor como \"auto\". Los usuarios de China continental pueden utilizar réplicas locales de gcr.io, como registry.cn-hangzhou.aliyuncs.com/google_containers", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "Cantidad de RAM asignada a la VM de minikube (formato: \u003cnúmero\u003e[\u003cunidad\u003e], donde unidad = b, k, m o g)", - "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", + "Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", "Amount of time to wait for a service in seconds": "", "Amount of time to wait for service in seconds": "", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", @@ -48,6 +44,7 @@ "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", "Bind Address: {{.Address}}": "", "Block until the apiserver is servicing API requests": "", + "Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "", "Cannot find directory {{.path}} for mount": "", "Cannot use both --output and --format options": "", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", @@ -66,9 +63,9 @@ "Could not process error from failed deletion": "", "Could not process errors from failed deletion": "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "Código de país de la réplica de imagen que quieras utilizar. Déjalo en blanco para usar el valor global. Los usuarios de China continental deben definirlo como cn.", - "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", + "Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", "Creating mount {{.name}} ...": "Creando la activación {{.name}}...", - "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", "DEPRECATED, use `driver` instead.": "", "Default group id used for the mount": "", "Default user id used for the mount": "", @@ -97,10 +94,9 @@ "Done! kubectl is now configured to use \"{{.name}}\"": "", "Done! kubectl is now configured to use \"{{.name}}__1": "¡Listo! Se ha configurado kubectl para que use \"{{.name}}", "Download complete!": "Se ha completado la descarga", + "Downloading Kubernetes {{.version}} preload ...": "", "Downloading VM boot image ...": "", "Downloading driver {{.driver}}:": "", - "Downloading preloaded images tarball for k8s {{.version}} ...": "", - "Downloading {{.name}} {{.version}}": "", "ERROR creating `registry-creds-acr` secret": "", "ERROR creating `registry-creds-dpr` secret": "", "ERROR creating `registry-creds-ecr` secret: {{.error}}": "", @@ -109,7 +105,6 @@ "Enable addons. see `minikube addons list` for a list of valid addon names.": "", "Enable experimental NVIDIA GPU support in minikube": "Permite habilitar la compatibilidad experimental con GPUs NVIDIA en minikube", "Enable host resolver for NAT DNS requests (virtualbox driver only)": "Permite habilitar la resolución del host en las solicitudes DNS con traducción de direcciones de red (NAT) aplicada (solo con el controlador de Virtualbox)", - "Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "", "Enable proxy for NAT DNS requests (virtualbox driver only)": "Permite habilitar el uso de proxies en las solicitudes de DNS con traducción de direcciones de red (NAT) aplicada (solo con el controlador de Virtualbox)", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\": "Permite habilitar el complemento CNI predeterminado (/etc/cni/net.d/k8s.conf). Se utiliza junto con \"--network-plugin=cni", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "", @@ -131,45 +126,29 @@ "Error finding port for mount": "", "Error generating set output": "", "Error generating unset output": "", - "Error getting IP": "", - "Error getting client": "", - "Error getting client: {{.error}}": "", - "Error getting cluster": "", "Error getting cluster bootstrapper": "", "Error getting cluster config": "", - "Error getting config": "", - "Error getting control plane": "", "Error getting host": "", - "Error getting host IP": "", - "Error getting host status": "", - "Error getting machine logs": "", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", "Error getting primary control plane": "", - "Error getting primary cp": "", - "Error getting service status": "", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", "Error getting ssh client": "", "Error getting the host IP address to use from within the VM": "", - "Error host driver ip status": "", "Error killing mount process": "", - "Error loading api": "", - "Error loading profile config": "", "Error loading profile config: {{.error}}": "", "Error loading profile {{.name}}: {{.error}}": "No se ha podido cargar el perfil {{.name}}: {{.error}}", "Error opening service": "", "Error parsing Driver version: {{.error}}": "No se ha podido analizar la versión de Driver: {{.error}}", "Error parsing minikube version: {{.error}}": "No se ha podido analizar la versión de minikube: {{.error}}", "Error reading {{.path}}: {{.error}}": "", - "Error retrieving node": "", "Error starting cluster": "", "Error starting mount": "", - "Error starting node": "", "Error while setting kubectl current context : {{.error}}": "", "Error writing mount pid": "", - "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "", "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Error: Has seleccionado Kubernetes {{.new}}, pero el clúster de tu perfil utiliza la versión {{.old}}. No se puede cambiar a una versión inferior sin eliminar todos los datos y recursos pertinentes, pero dispones de las siguientes opciones para continuar con la operación:\n* Volver a crear el clúster con Kubernetes {{.new}}: ejecuta \"minikube delete {{.profile}}\" y, luego, \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Crear un segundo clúster con Kubernetes {{.new}}: ejecuta \"minikube start -p \u003cnuevo nombre\u003e --kubernetes-version={{.new}}\"\n* Reutilizar el clúster actual con Kubernetes {{.old}} o una versión posterior: ejecuta \"minikube start {{.profile}} --kubernetes-version={{.old}}", "Error: [{{.id}}] {{.error}}": "", "Examples": "", + "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "", "Exiting": "Saliendo", "Exiting.": "", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "", @@ -177,38 +156,38 @@ "Failed to cache ISO": "", "Failed to cache and load images": "", "Failed to cache binaries": "", + "Failed to cache images": "", "Failed to cache images to tar": "", "Failed to cache kubectl": "", "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "No se han podido cambiar los permisos de {{.minikube_dir_path}}: {{.error}}", - "Failed to check if machine exists": "", "Failed to check main repository and mirrors for images for images": "", "Failed to delete cluster: {{.error}}": "No se ha podido eliminar el clúster: {{.error}}", "Failed to delete cluster: {{.error}}__1": "No se ha podido eliminar el clúster: {{.error}}", "Failed to delete images": "", "Failed to delete images from config": "", - "Failed to delete node {{.name}}": "", "Failed to enable container runtime": "", "Failed to generate config": "", + "Failed to get API Server URL": "", "Failed to get bootstrapper": "", "Failed to get command runner": "", - "Failed to get driver URL": "", "Failed to get image map": "", "Failed to get machine client": "", "Failed to get service URL: {{.error}}": "", "Failed to kill mount process: {{.error}}": "No se ha podido detener el proceso de activación: {{.error}}", "Failed to list cached images": "", + "Failed to parse kubernetes version": "", "Failed to reload cached images": "", "Failed to save config": "", "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "No se ha podido definir la variable de entorno NO_PROXY. Utiliza export NO_PROXY=$NO_PROXY,{{.ip}}", "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "", "Failed to setup certs": "", "Failed to setup kubeconfig": "", - "Failed to start node {{.name}}": "", "Failed to stop node {{.name}}": "", "Failed to update cluster": "", "Failed to update config": "", "Failed unmount: {{.error}}": "", "File permissions used for the mount": "", + "Filter to use only VM Drivers": "", "Flags": "", "Follow": "", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "Para disfrutar de un funcionamiento óptimo, instala kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/", @@ -218,13 +197,16 @@ "Force minikube to perform possibly dangerous operations": "Permite forzar minikube para que realice operaciones potencialmente peligrosas", "Found network options:": "Se han encontrado las siguientes opciones de red:", "Found {{.number}} invalid profile(s) !": "", + "Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Generate unable to parse memory '{{.memory}}': {{.error}}": "", "Gets the kubernetes URL(s) for the specified service in your local cluster": "", "Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "", "Gets the logs of the running instance, used for debugging minikube, not user code.": "", "Gets the status of a local kubernetes cluster": "", "Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "", "Gets the value of PROPERTY_NAME from the minikube config file": "", - "Getting machine config failed": "", + "Getting bootstrapper": "", + "Getting primary control plane": "", "Global Flags": "", "Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "", "Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", @@ -235,6 +217,7 @@ "Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", + "If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "", "If set, install addons. Defaults to true.": "", "If set, pause all namespaces": "", "If set, unpause all namespaces": "", @@ -251,8 +234,9 @@ "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "Registros de Docker que no son seguros y que se transferirán al daemon de Docker. Se añadirá automáticamente el intervalo CIDR de servicio predeterminado.", "Install VirtualBox, or select an alternative value for --driver": "", "Install the latest hyperkit binary, and run 'minikube delete'": "", - "Invalid size passed in argument: {{.error}}": "", "IsEnabled failed": "", + "Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "", + "Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "", "Kill the mount process spawned by minikube start": "", "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "", "Kubernetes {{.version}} is not supported by this release of minikube": "", @@ -267,7 +251,7 @@ "Local folders to share with Guest via NFS mounts (hyperkit driver only)": "Carpetas locales que se compartirán con el invitado mediante activaciones de NFS (solo con el controlador de hyperkit)", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "Ubicación del socket de VPNKit que se utiliza para ofrecer funciones de red. Si se deja en blanco, se inhabilita VPNKitSock de Hyperkit; si se define como \"auto\", se utiliza Docker para las conexiones de VPNKit en Mac. Con cualquier otro valor, se utiliza el VSock especificado (solo con el controlador de hyperkit)", "Location of the minikube iso": "Ubicación de la ISO de minikube", - "Location of the minikube iso.": "", + "Locations to fetch the minikube ISO from.": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "", "Message Size: {{.size}}": "", @@ -285,15 +269,18 @@ "NOTE: This process must stay alive for the mount to be accessible ...": "", "Networking and Connectivity Commands:": "", "No minikube profile was found. You can create one using `minikube start`.": "", + "Node \"{{.node_name}}\" stopped.": "", "Node may be unable to resolve external DNS records": "", "Node operations": "", "Node {{.name}} was successfully deleted.": "", + "Node {{.nodeName}} does not exist.": "", + "Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "", "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "No se puede acceder a ninguno de los repositorios conocidos de tu ubicación. Se utilizará {{.image_repository_name}} como alternativa.", "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "No se puede acceder a ninguno de los repositorios conocidos. Plantéate indicar un repositorio de imágenes alternativo con la marca --image-repository.", "Not passing {{.name}}={{.value}} to docker env.": "", - "Noticed that you are using minikube docker-env:": "", + "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "", + "Number of CPUs allocated to Kubernetes.": "", "Number of CPUs allocated to the minikube VM": "Número de CPU asignadas a la VM de minikube", - "Number of CPUs allocated to the minikube VM.": "", "Number of lines back to go within the log": "", "OS release is {{.pretty_name}}": "", "Open the addons URL with https instead of http": "", @@ -314,6 +301,7 @@ "Please install the minikube hyperkit VM driver, or select an alternative --driver": "", "Please install the minikube kvm2 VM driver, or select an alternative --driver": "", "Please make sure the service you are looking for is deployed or is in the correct namespace.": "", + "Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "", "Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "Actualiza \"{{.driver_executable}}\". {{.documentation_url}}", "Populates the specified folder with documentation in markdown about minikube": "", @@ -327,10 +315,10 @@ "Profile gets or sets the current minikube profile": "", "Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "", "Provide VM UUID to restore MAC address (hyperkit driver only)": "Permite especificar un UUID de VM para restaurar la dirección MAC (solo con el controlador de hyperkit)", + "Pulling base image ...": "", "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "", "Rebuild libvirt with virt-network support": "", "Received {{.name}} signal": "", - "Reconfiguring existing host ...": "", "Registry mirrors to pass to the Docker daemon": "Réplicas del registro que se transferirán al daemon de Docker", "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "", "Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "", @@ -341,7 +329,10 @@ "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "", "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "El tamaño de disco de {{.requested_size}} que se ha solicitado es inferior al tamaño mínimo de {{.minimum_size}}", "Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "El valor de la asignación de memoria ({{.memory}} MB) solicitada es inferior a la asignación de memoria predeterminada de {{.default_memorysize}} MB. minikube podría no funcionar correctamente o fallar de manera inesperada.", + "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "", "Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "El valor de la asignación de memoria de {{.requested_size}} solicitada es inferior al valor mínimo de {{.minimum_size}}", + "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "", + "Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "", "Retrieve the ssh identity key path of the specified cluster": "", "Retrieve the ssh identity key path of the specified cluster.": "", "Retrieves the IP address of the running cluster": "", @@ -354,8 +345,10 @@ "Run minikube from the C: drive.": "", "Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "", "Run the minikube command as an Administrator": "", + "Run: \"{{.delete}}\", then \"{{.start}} --alsologtostderr -v=1\" to try again with more logging": "", "Run: 'chmod 600 $HOME/.kube/config'": "", "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "", "Set failed": "", "Set flag to delete all profiles": "", "Set this flag to delete the '.minikube' folder from your user directory.": "", @@ -381,8 +374,8 @@ "Specify the 9p version that the mount should use": "", "Specify the ip that the mount should be setup on": "", "Specify the mount filesystem type (supported types: 9p)": "", - "Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "", - "Starting node": "", + "StartHost failed again: {{.error}}": "", + "StartHost failed, but will try again: {{.error}}": "", "Starting tunnel for service {{.service}}.": "", "Starts a local kubernetes cluster": "Inicia un clúster de Kubernetes local", "Starts a node.": "", @@ -395,7 +388,6 @@ "Successfully added {{.name}} to {{.cluster}}!": "", "Successfully deleted all profiles": "", "Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "", - "Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "", "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "", "Suggestion: {{.advice}}": "", "Suggestion: {{.fix}}": "", @@ -427,12 +419,16 @@ "The cluster dns domain name used in the kubernetes cluster": "El nombre de dominio de DNS del clúster de Kubernetes", "The container runtime to be used (docker, crio, containerd)": "El entorno de ejecución del contenedor (Docker, cri-o, containerd)", "The container runtime to be used (docker, crio, containerd).": "", + "The control plane for \"{{.name}}\" is paused!": "", + "The control plane node \"{{.name}}\" does not exist.": "", + "The control plane node is not running (state={{.state}})": "", + "The control plane node must be running for this command": "", "The cri socket path to be used": "La ruta del socket de cri", "The cri socket path to be used.": "", - "The docker service within '{{.profile}}' is not active": "", + "The docker service within '{{.name}}' is not active": "", + "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", "The driver '{{.driver}}' is not supported on {{.os}}": "El controlador \"{{.driver}}\" no se puede utilizar en {{.os}}", - "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "", - "The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "", + "The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "El nombre del conmutador virtual de hyperv. El valor predeterminado será el primer nombre que se encuentre (solo con el controlador de hyperv).", "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "", "The initial time interval for each check that wait performs in seconds": "", @@ -444,10 +440,14 @@ "The name of the node to delete": "", "The name of the node to start": "", "The node to get logs from. Defaults to the primary control plane.": "", + "The node to ssh into. Defaults to the primary control plane.": "", + "The none driver is not compatible with multi-node clusters.": "", + "The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "", "The number of bytes to use for 9p packet payload": "", + "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", - "The podman service within '{{.profile}}' is not active": "", + "The podman service within '{{.cluster}}' is not active": "", "The service namespace": "", "The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "", "The services namespace": "", @@ -456,19 +456,24 @@ "The value passed to --format is invalid: {{.error}}": "", "The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "", "The {{.driver_name}} driver should not be used with root privileges.": "El controlador {{.driver_name}} no se debe utilizar con privilegios de raíz.", + "There is no local cluster named \"{{.cluster}}\"": "", "There's a new version for '{{.driver_executable}}'. Please consider upgrading. {{.documentation_url}}": "Hay una nueva versión de \"{{.driver_executable}}\". Te recomendamos que realices la actualización. {{.documentation_url}}", "These changes will take effect upon a minikube delete and then a minikube start": "", "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "El proceso se puede automatizar si se define la variable de entorno CHANGE_MINIKUBE_NONE_USER=true", + "This control plane is not running! (state={{.state}})": "", + "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "Se conservará el contexto de kubectl actual y se creará uno de minikube.", "This will start the mount daemon and automatically mount files into minikube": "Se iniciará el daemon de activación y se activarán automáticamente los archivos en minikube", "This will start the mount daemon and automatically mount files into minikube.": "", + "Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "", "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "Para eliminar este clúster de raíz, ejecuta: sudo {{.cmd}} delete", "To connect to this cluster, use: kubectl --context={{.name}}": "Para conectarte a este clúster, usa: kubectl --context={{.name}}", "To connect to this cluster, use: kubectl --context={{.name}}__1": "Para conectarte a este clúster, usa: kubectl --context={{.name}}", "To connect to this cluster, use: kubectl --context={{.profile_name}}": "", "To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "", - "To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "", + "To fix this, run: {{.command}}": "", + "To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "", "To see addons list for other profiles use: `minikube addons -p name list`": "", "To start minikube with HyperV Powershell must be in your PATH`": "", "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "Para usar comandos de kubectl o minikube como tu propio usuario, puede que debas reubicarlos. Por ejemplo, para sobrescribir tu configuración, ejecuta:", @@ -478,24 +483,31 @@ "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", + "Unable to find control plane": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get VM IP address": "", "Unable to get addon status for {{.name}}: {{.error}}": "", "Unable to get bootstrapper: {{.error}}": "No se ha podido obtener el programa previo: {{.error}}", + "Unable to get command runner": "", + "Unable to get control plane status: {{.error}}": "", "Unable to get current user": "", + "Unable to get driver IP": "", + "Unable to get machine status": "", "Unable to get runtime": "", - "Unable to get the status of the {{.name}} cluster.": "", "Unable to kill mount process: {{.error}}": "", "Unable to load cached images from config file.": "No se han podido cargar las imágenes almacenadas en caché del archivo de configuración.", "Unable to load cached images: {{.error}}": "", "Unable to load config: {{.error}}": "No se ha podido cargar la configuración: {{.error}}", + "Unable to load host": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "No se ha podido analizar la versión \"{{.kubernetes_version}}\": {{.error}}", "Unable to parse default Kubernetes version from constants: {{.error}}": "", + "Unable to parse memory '{{.memory}}': {{.error}}": "", "Unable to parse oldest Kubernetes version from constants: {{.error}}": "", "Unable to pull images, which may be OK: {{.error}}": "No se ha podido recuperar imágenes, que podrían estar en buen estado: {{.error}}", - "Unable to remove machine directory: %v": "", - "Unable to start VM. Please investigate and run 'minikube delete' if possible": "", + "Unable to remove machine directory": "", + "Unable to restart cluster, will reset it: {{.error}}": "", + "Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "", "Unable to stop VM": "", "Unable to update {{.driver}} driver: {{.error}}": "", "Unable to verify SSH connectivity: {{.error}}. Will retry...": "", @@ -506,6 +518,8 @@ "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "", "Unset variables instead of setting them": "", "Update server returned an empty list": "", + "Updating node": "", + "Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "", "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "", "Upgrading from Kubernetes {{.old}} to {{.new}}": "Actualizando la versión de Kubernetes de {{.old}} a {{.new}}", "Usage": "", @@ -526,11 +540,11 @@ "Userspace file server:": "", "Using image repository {{.name}}": "Utilizando el repositorio de imágenes {{.name}}", "Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "", - "Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "", "Using the {{.driver}} driver based on existing profile": "", "Using the {{.driver}} driver based on user configuration": "", "VM driver is one of: %v": "El controlador de la VM es uno de los siguientes: %v", "VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "", + "Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "", "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "", "Verify the IP address of the running cluster in kubeconfig.": "", "Verifying dashboard health ...": "", @@ -545,11 +559,12 @@ "Wait failed": "", "Wait failed: {{.error}}": "", "Wait until Kubernetes core services are healthy before exiting": "Espera hasta que los servicios principales de Kubernetes se encuentren en buen estado antes de salir", - "Waiting for cluster to come online ...": "", "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "Ruta en la raíz de los recursos compartidos de NFS. Su valor predeterminado es /nfsshares (solo con el controlador de hyperkit)", "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "", "You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "Parece que estás usando un proxy, pero tu entorno NO_PROXY no incluye la dirección IP de minikube ({{.ip_address}}). Consulta {{.documentation_url}} para obtener más información", + "You can also use 'minikube kubectl -- get pods' to invoke a matching version": "", "You can delete them using the following command(s):": "", + "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "", "You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "Puede que tengas que retirar manualmente la VM \"{{.name}}\" de tu hipervisor", "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "", "You must specify a service name": "", @@ -558,45 +573,43 @@ "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "", "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "", "Your minikube vm is not running, try minikube start.": "", + "adding node": "", "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "", "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "", "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "", - "api load": "", "bash completion failed": "", "call with cleanup=true to remove old tunnels": "", - "command runner": "", "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "", "config view failed": "", - "creating api client": "", "dashboard service is not running: {{.error}}": "", + "deleting node": "", "disable failed": "", "dry-run mode. Validates configuration, but does not mutate system state": "", "dry-run validation complete!": "", "enable failed": "", "error creating clientset": "", - "error creating machine client": "", "error getting primary control plane": "", "error getting ssh port": "", "error parsing the input ip address for mount": "", "error starting tunnel": "", "error stopping tunnel": "", "failed to open browser: {{.error}}": "", - "getting config": "", - "getting primary control plane": "", + "generating join token": "", "if true, will embed the certs in kubeconfig.": "", "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "", + "initialization failed, will try again: {{.error}}": "", + "joining cluster": "", "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "", "kubectl and minikube configuration will be stored in {{.home_folder}}": "La configuración de kubectl y de minikube se almacenará en {{.home_folder}}", "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "", "kubectl proxy": "", - "loading config": "", + "libmachine failed": "", "logdir set failed": "", - "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "", "max time to wait per Kubernetes core services to be healthy.": "", "minikube addons list --output OUTPUT. json, list": "", "minikube is exiting due to an error. If the above message is not useful, open an issue:": "", "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "", - "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "", + "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "", "minikube profile was successfully set to {{.profile_name}}": "", "minikube status --output OUTPUT. json, text": "", "minikube {{.version}} is available! Download it: {{.url}}": "", @@ -605,14 +618,16 @@ "mount failed": "", "namespaces to pause": "", "namespaces to unpause": "", + "none driver does not support multi-node clusters": "", "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", "pause containers": "", "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "", - "profile {{.name}} is not running.": "", "reload cached images.": "", "reloads images previously added using the 'cache add' subcommand": "", "retrieving node": "", + "saving node": "", "service {{.namespace_name}}/{{.service_name}} has no node port": "", + "setting up certs": "", "stat failed": "", "status json failure": "", "status text failure": "", @@ -637,16 +652,16 @@ "usage: minikube delete": "", "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "", "zsh completion failed": "", + "{{.cluster}} IP has been updated to point at {{.ip}}": "", + "{{.cluster}} IP was already correctly configured for {{.ip}}": "", + "{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "", "{{.driver}} does not appear to be installed": "", "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "", "{{.extra_option_component_name}}.{{.key}}={{.value}}": "", - "{{.machine}} IP has been updated to point at {{.ip}}": "", - "{{.machine}} IP was already correctly configured for {{.ip}}": "", - "{{.name}} cluster does not exist": "", "{{.name}} has no available configuration options": "", "{{.name}} is already running": "", "{{.name}} was successfully configured": "", - "{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "", + "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "", "{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.prefix}}minikube {{.version}} en {{.platform}}", "{{.type}} is not yet a supported filesystem. We will try anyways!": "", "{{.url}} is not accessible: {{.error}}": "" diff --git a/translations/fr.json b/translations/fr.json index 7dd081da10..3148fd8a0a 100644 --- a/translations/fr.json +++ b/translations/fr.json @@ -1,17 +1,12 @@ { "\"The '{{.minikube_addon}}' addon is disabled": "", - "\"{{.name}}\" profile does not exist": "Le profil \"{{.name}}\" n'existe pas.", - "\"{{.name}}\" profile does not exist, trying anyways.": "", - "\"{{.node_name}}\" stopped.": "", - "\"{{.profile_name}}\" does not exist, nothing to stop": "", - "\"{{.profile_name}}\" host does not exist, unable to show an IP": "", - "\"{{.profile_name}}\" stopped.": "\"{{.profile_name}}\" est arrêté.", + "\"{{.machineName}}\" does not exist, nothing to stop": "\"{{.machineName}} n'exist pas, rien a arrêter.", + "\"{{.name}}\" profile does not exist, trying anyways.": "Le profil \"{{.name}}\" n'existe pas, tentative de suppression quand même.", "'none' driver does not support 'minikube docker-env' command": "", "'none' driver does not support 'minikube mount' command": "", "'none' driver does not support 'minikube podman-env' command": "", "'none' driver does not support 'minikube ssh' command": "", "'{{.driver}}' driver reported an issue: {{.error}}": "", - "'{{.profile}}' is not running": "", "- {{.profile}}": "", "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "", "A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "", @@ -33,22 +28,22 @@ "Adds a node to the given cluster config, and starts it.": "", "Adds a node to the given cluster.": "", "Advanced Commands:": "", - "After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Aliases": "", "Allow user prompts for more information": "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "Autre dépôt d'images d'où extraire des images Docker. Il peut être utilisé en cas d'accès limité à gcr.io. Définissez-le sur \\\"auto\\\" pour permettre à minikube de choisir la valeur à votre place. Pour les utilisateurs situés en Chine continentale, vous pouvez utiliser des miroirs gcr.io locaux tels que registry.cn-hangzhou.aliyuncs.com/google_containers.", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "Quantité de mémoire RAM allouée à la VM minikube (format : \u003cnombre\u003e[\u003cunité\u003e], où \"unité\" = b, k, m ou g).", - "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", + "Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", "Amount of time to wait for a service in seconds": "", "Amount of time to wait for service in seconds": "", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", - "Automatically selected the {{.driver}} driver": "", - "Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "", + "Automatically selected the {{.driver}} driver": "Choix automatique du driver {{.driver}}", + "Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "Choix automatique du driver {{.driver}}. Autres choix: {{.alternatives}}", "Available Commands": "", "Basic Commands:": "", "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", "Bind Address: {{.Address}}": "", "Block until the apiserver is servicing API requests": "", + "Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "", "Cannot find directory {{.path}} for mount": "", "Cannot use both --output and --format options": "", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", @@ -68,16 +63,15 @@ "Could not process error from failed deletion": "", "Could not process errors from failed deletion": "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "Code pays du miroir d'images à utiliser. Laissez ce paramètre vide pour utiliser le miroir international. Pour les utilisateurs situés en Chine continentale, définissez sa valeur sur \"cn\".", - "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", + "Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", "Creating mount {{.name}} ...": "Création de l'installation {{.name}}…", - "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "Création d'une VM {{.driver_name}} (CPUs={{.number_of_cpus}}, Mémoire={{.memory_size}}MB, Disque={{.disk_size}}MB)...", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "Création de {{.machine_type}} {{.driver_name}} (CPUs={{.number_of_cpus}}, Mémoire={{.memory_size}}MB, Disque={{.disk_size}}MB)...", "DEPRECATED, use `driver` instead.": "", "Default group id used for the mount": "", "Default user id used for the mount": "", "Delete an image from the local cache.": "", "Deletes a local kubernetes cluster": "", - "Deletes a local kubernetes cluster. This command deletes the VM, and removes all\nassociated files.": "", - "Deletes a local kubernetes cluster. This command deletes the VM, and removes all associated files.": "Supprime le cluster Kubernetes local. Cette commande supprime la VM ainsi que tous les fichiers associés.", + "Deletes a local kubernetes cluster. This command deletes the VM, and removes all\nassociated files.": "Supprime le cluster Kubernetes local. Cette commande supprime la VM ainsi que tous les fichiers associés.", "Deletes a node from a cluster.": "", "Deleting \"{{.profile_name}}\" in {{.driver_name}} ...": "Suppression de \"{{.profile_name}}\" dans {{.driver_name}}...", "Deleting node {{.name}} from cluster {{.cluster}}": "Suppression de noeuds {{.name}} de cluster {{.cluster}}", @@ -90,18 +84,15 @@ "Display dashboard URL instead of opening a browser": "", "Display the kubernetes addons URL in the CLI instead of opening it in the default browser": "", "Display the kubernetes service URL in the CLI instead of opening it in the default browser": "", - "Display values currently set in the minikube config file": "", "Display values currently set in the minikube config file.": "", "Docker inside the VM is unavailable. Try running 'minikube delete' to reset the VM.": "", "Docs have been saved at - {{.path}}": "", "Documentation: {{.url}}": "", - "Done! kubectl is now configured to use \"{{.name}}": "Terminé ! kubectl est maintenant configuré pour utiliser \"{{.name}}\".", - "Done! kubectl is now configured to use \"{{.name}}\"": "", + "Done! kubectl is now configured to use \"{{.name}}\"": "Terminé ! kubectl est maintenant configuré pour utiliser \"{{.name}}\".", "Download complete!": "Téléchargement terminé !", + "Downloading Kubernetes {{.version}} preload ...": "", "Downloading VM boot image ...": "", "Downloading driver {{.driver}}:": "", - "Downloading preloaded images tarball for k8s {{.version}} ...": "", - "Downloading {{.name}} {{.version}}": "", "ERROR creating `registry-creds-acr` secret": "", "ERROR creating `registry-creds-dpr` secret": "", "ERROR creating `registry-creds-ecr` secret: {{.error}}": "", @@ -110,13 +101,12 @@ "Enable addons. see `minikube addons list` for a list of valid addon names.": "", "Enable experimental NVIDIA GPU support in minikube": "Active l'assistance expérimentale du GPU NVIDIA dans minikube.", "Enable host resolver for NAT DNS requests (virtualbox driver only)": "Active le résolveur d'hôte pour les requêtes DNS NAT (pilote VirtualBox uniquement).", - "Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "", "Enable proxy for NAT DNS requests (virtualbox driver only)": "Active le proxy pour les requêtes DNS NAT (pilote VirtualBox uniquement).", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\": "Active le plug-in CNI par défaut (/etc/cni/net.d/k8s.conf). Utilisé en association avec \\\"--network-plugin=cni\\\".", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "", "Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list": "", "Enabling '{{.name}}' returned an error: {{.error}}": "", - "Enabling addons: {{.addons}}": "", + "Enabling addons: {{.addons}}": "Installation des addons: {{.addons}}", "Enabling dashboard ...": "", "Ensure that CRI-O is installed and healthy: Run 'sudo systemctl start crio' and 'journalctl -u crio'. Alternatively, use --container-runtime=docker": "", "Ensure that Docker is installed and healthy: Run 'sudo systemctl start docker' and 'journalctl -u docker'. Alternatively, select another value for --driver": "", @@ -132,45 +122,29 @@ "Error finding port for mount": "", "Error generating set output": "", "Error generating unset output": "", - "Error getting IP": "", - "Error getting client": "", - "Error getting client: {{.error}}": "", - "Error getting cluster": "", "Error getting cluster bootstrapper": "", "Error getting cluster config": "", - "Error getting config": "", - "Error getting control plane": "", "Error getting host": "", - "Error getting host IP": "", - "Error getting host status": "", - "Error getting machine logs": "", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", "Error getting primary control plane": "", - "Error getting primary cp": "", - "Error getting service status": "", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", "Error getting ssh client": "", "Error getting the host IP address to use from within the VM": "", - "Error host driver ip status": "", "Error killing mount process": "", - "Error loading api": "", - "Error loading profile config": "", "Error loading profile config: {{.error}}": "", "Error loading profile {{.name}}: {{.error}}": "Erreur lors du chargement du profil {{.name}} : {{.error}}", "Error opening service": "", "Error parsing Driver version: {{.error}}": "Erreur lors de l'analyse de la version du pilote de la VM : {{.error}}", "Error parsing minikube version: {{.error}}": "Erreur lors de l'analyse de la version de minikube : {{.error}}", "Error reading {{.path}}: {{.error}}": "", - "Error retrieving node": "", "Error starting cluster": "", "Error starting mount": "", - "Error starting node": "", "Error while setting kubectl current context : {{.error}}": "", "Error writing mount pid": "", - "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "", "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Erreur : Vous avez sélectionné Kubernetes v{{.new}}, mais le cluster existent pour votre profil exécute Kubernetes v{{.old}}. Les rétrogradations non-destructives ne sont pas compatibles. Toutefois, vous pouvez poursuivre le processus en réalisant l'une des trois actions suivantes :\n* Créer à nouveau le cluster en utilisant Kubernetes v{{.new}} – exécutez \"minikube delete {{.profile}}\", puis \"minikube start {{.profile}} --kubernetes-version={{.new}}\".\n* Créer un second cluster avec Kubernetes v{{.new}} – exécutez \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\".\n* Réutiliser le cluster existent avec Kubernetes v{{.old}} ou version ultérieure – exécutez \"minikube start {{.profile}} --kubernetes-version={{.old}}\".", "Error: [{{.id}}] {{.error}}": "", "Examples": "", + "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "", "Exiting": "Fermeture…", "Exiting.": "", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "", @@ -178,38 +152,38 @@ "Failed to cache ISO": "", "Failed to cache and load images": "", "Failed to cache binaries": "", + "Failed to cache images": "", "Failed to cache images to tar": "", "Failed to cache kubectl": "", "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "Échec de la modification des autorisations pour {{.minikube_dir_path}} : {{.error}}", - "Failed to check if machine exists": "", "Failed to check main repository and mirrors for images for images": "", "Failed to delete cluster: {{.error}}": "Échec de la suppression du cluster : {{.error}}", "Failed to delete cluster: {{.error}}__1": "Échec de la suppression du cluster : {{.error}}", "Failed to delete images": "", "Failed to delete images from config": "", - "Failed to delete node {{.name}}": "", "Failed to enable container runtime": "", "Failed to generate config": "", + "Failed to get API Server URL": "", "Failed to get bootstrapper": "", "Failed to get command runner": "", - "Failed to get driver URL": "", "Failed to get image map": "", "Failed to get machine client": "", "Failed to get service URL: {{.error}}": "", "Failed to kill mount process: {{.error}}": "Échec de l'arrêt du processus d'installation : {{.error}}", "Failed to list cached images": "", + "Failed to parse kubernetes version": "", "Failed to reload cached images": "", "Failed to save config": "", "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "Échec de la définition de NO_PROXY Env. Veuillez utiliser `export NO_PROXY=$NO_PROXY,{{.ip}}.", "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "", "Failed to setup certs": "", "Failed to setup kubeconfig": "", - "Failed to start node {{.name}}": "", "Failed to stop node {{.name}}": "", "Failed to update cluster": "", "Failed to update config": "", "Failed unmount: {{.error}}": "", "File permissions used for the mount": "", + "Filter to use only VM Drivers": "", "Flags": "", "Follow": "", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "Pour des résultats optimaux, installez kubectl à l'adresse suivante : https://kubernetes.io/docs/tasks/tools/install-kubectl/", @@ -219,13 +193,16 @@ "Force minikube to perform possibly dangerous operations": "Oblige minikube à réaliser des opérations possiblement dangereuses.", "Found network options:": "Options de réseau trouvées :", "Found {{.number}} invalid profile(s) !": "", + "Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Generate unable to parse memory '{{.memory}}': {{.error}}": "", "Gets the kubernetes URL(s) for the specified service in your local cluster": "", "Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "", "Gets the logs of the running instance, used for debugging minikube, not user code.": "", "Gets the status of a local kubernetes cluster": "", "Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "", "Gets the value of PROPERTY_NAME from the minikube config file": "", - "Getting machine config failed": "", + "Getting bootstrapper": "", + "Getting primary control plane": "", "Global Flags": "", "Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "", "Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", @@ -236,6 +213,7 @@ "Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", + "If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "", "If set, install addons. Defaults to true.": "", "If set, pause all namespaces": "", "If set, unpause all namespaces": "", @@ -252,8 +230,9 @@ "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "Registres Docker non sécurisés à transmettre au daemon Docker. La plage CIDR par défaut du service sera ajoutée automatiquement.", "Install VirtualBox, or select an alternative value for --driver": "", "Install the latest hyperkit binary, and run 'minikube delete'": "", - "Invalid size passed in argument: {{.error}}": "", "IsEnabled failed": "", + "Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "", + "Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "", "Kill the mount process spawned by minikube start": "", "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "", "Kubernetes {{.version}} is not supported by this release of minikube": "", @@ -268,7 +247,7 @@ "Local folders to share with Guest via NFS mounts (hyperkit driver only)": "Dossiers locaux à partager avec l'invité par des installations NFS (pilote hyperkit uniquement).", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "Emplacement du socket VPNKit exploité pour la mise en réseau. Si la valeur est vide, désactive Hyperkit VPNKitSock. Si la valeur affiche \"auto\", utilise la connexion VPNKit de Docker pour Mac. Sinon, utilise le VSock spécifié (pilote hyperkit uniquement).", "Location of the minikube iso": "Emplacement de l'ISO minikube.", - "Location of the minikube iso.": "", + "Locations to fetch the minikube ISO from.": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "", "Message Size: {{.size}}": "", @@ -286,15 +265,18 @@ "NOTE: This process must stay alive for the mount to be accessible ...": "", "Networking and Connectivity Commands:": "", "No minikube profile was found. You can create one using `minikube start`.": "", + "Node \"{{.node_name}}\" stopped.": "Le noeud \"{{.node_name}}\" est arrêté.", "Node may be unable to resolve external DNS records": "", "Node operations": "", "Node {{.name}} was successfully deleted.": "", + "Node {{.nodeName}} does not exist.": "", + "Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "", "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "Aucun dépôt connu dans votre emplacement n'est accessible. {{.image_repository_name}} est utilisé comme dépôt de remplacement.", "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "Aucun dépôt connu n'est accessible. Pensez à spécifier un autre dépôt d'images à l'aide de l'indicateur \"--image-repository\".", "Not passing {{.name}}={{.value}} to docker env.": "", - "Noticed that you are using minikube docker-env:": "", + "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "", + "Number of CPUs allocated to Kubernetes.": "", "Number of CPUs allocated to the minikube VM": "Nombre de processeurs alloués à la VM minikube.", - "Number of CPUs allocated to the minikube VM.": "", "Number of lines back to go within the log": "", "OS release is {{.pretty_name}}": "", "Open the addons URL with https instead of http": "", @@ -315,6 +297,7 @@ "Please install the minikube hyperkit VM driver, or select an alternative --driver": "", "Please install the minikube kvm2 VM driver, or select an alternative --driver": "", "Please make sure the service you are looking for is deployed or is in the correct namespace.": "", + "Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "", "Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "Veuillez mettre à niveau l'exécutable \"{{.driver_executable}}\". {{.documentation_url}}", "Populates the specified folder with documentation in markdown about minikube": "", @@ -328,22 +311,25 @@ "Profile gets or sets the current minikube profile": "", "Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "", "Provide VM UUID to restore MAC address (hyperkit driver only)": "Fournit l'identifiant unique universel (UUID) de la VM pour restaurer l'adresse MAC (pilote hyperkit uniquement).", + "Pulling base image ...": "", "Pulling images ...": "Extraction des images... ", "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "", "Rebuild libvirt with virt-network support": "", "Received {{.name}} signal": "", - "Reconfiguring existing host ...": "", "Registry mirrors to pass to the Docker daemon": "Miroirs de dépôt à transmettre au daemon Docker.", "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "", "Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "", "Related issues:": "", "Relaunching Kubernetes using {{.bootstrapper}} ...": "Redémarrage de Kubernetes à l'aide de {{.bootstrapper}}…", - "Removed all traces of the \"{{.name}}\" cluster.": "", + "Removed all traces of the \"{{.name}}\" cluster.": "Le cluster \"{{.name}}\" a été supprimé.", "Removing {{.directory}} ...": "Suppression du répertoire {{.directory}}…", "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "", "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "La taille de disque demandée ({{.requested_size}}) est inférieure à la taille minimale ({{.minimum_size}}).", "Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "L'allocation de mémoire demandée ({{.memory}} Mo) est inférieure à l'allocation de mémoire par défaut ({{.default_memorysize}} Mo). Sachez que minikube pourrait ne pas fonctionner correctement ou planter de manière inattendue.", + "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "", "Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "L'allocation de mémoire demandée ({{.requested_size}}) est inférieure au minimum autorisé ({{.minimum_size}}).", + "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "", + "Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "", "Retrieve the ssh identity key path of the specified cluster": "", "Retrieve the ssh identity key path of the specified cluster.": "", "Retrieves the IP address of the running cluster": "", @@ -356,8 +342,10 @@ "Run minikube from the C: drive.": "", "Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "", "Run the minikube command as an Administrator": "", + "Run: \"{{.delete}}\", then \"{{.start}} --alsologtostderr -v=1\" to try again with more logging": "", "Run: 'chmod 600 $HOME/.kube/config'": "", "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "", "Set failed": "", "Set flag to delete all profiles": "", "Set this flag to delete the '.minikube' folder from your user directory.": "", @@ -383,8 +371,8 @@ "Specify the 9p version that the mount should use": "", "Specify the ip that the mount should be setup on": "", "Specify the mount filesystem type (supported types: 9p)": "", - "Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "", - "Starting node": "", + "StartHost failed again: {{.error}}": "", + "StartHost failed, but will try again: {{.error}}": "", "Starting tunnel for service {{.service}}.": "", "Starts a local kubernetes cluster": "Démarre un cluster Kubernetes local.", "Starts a node.": "", @@ -397,7 +385,6 @@ "Successfully added {{.name}} to {{.cluster}}!": "", "Successfully deleted all profiles": "", "Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "", - "Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "", "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "", "Suggestion: {{.advice}}": "", "Suggestion: {{.fix}}": "", @@ -405,7 +392,6 @@ "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "", "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --vm-driver={{.driver_name}}": "Le pilote \"{{.driver_name}}\" nécessite de disposer de droits racine. Veuillez exécuter minikube à l'aide de \"sudo minikube --vm-driver={{.driver_name}}\".", "The \"{{.driver_name}}\" driver should not be used with root privileges.": "", - "The \"{{.name}}\" cluster has been deleted.": "Le cluster \"{{.name}}\" a été supprimé.", "The 'none' driver provides limited isolation and may reduce system security and reliability.": "L'isolation fournie par le pilote \"none\" (aucun) est limitée, ce qui peut diminuer la sécurité et la fiabilité du système.", "The '{{.addonName}}' addon is enabled": "", "The '{{.driver}}' driver requires elevated permissions. The following commands will be executed:\\n\\n{{ .example }}\\n": "", @@ -428,12 +414,16 @@ "The cluster dns domain name used in the kubernetes cluster": "Nom du domaine DNS du cluster utilisé dans le cluster Kubernetes.", "The container runtime to be used (docker, crio, containerd)": "environment d'exécution du conteneur à utiliser (docker, crio, containerd).", "The container runtime to be used (docker, crio, containerd).": "", + "The control plane for \"{{.name}}\" is paused!": "", + "The control plane node \"{{.name}}\" does not exist.": "", + "The control plane node is not running (state={{.state}})": "", + "The control plane node must be running for this command": "", "The cri socket path to be used": "Chemin d'accès au socket CRI à utiliser.", "The cri socket path to be used.": "", - "The docker service within '{{.profile}}' is not active": "", + "The docker service within '{{.name}}' is not active": "", + "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", "The driver '{{.driver}}' is not supported on {{.os}}": "Le pilote \"{{.driver}}\" n'est pas compatible avec {{.os}}.", - "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "", - "The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "", + "The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "Nom du commutateur virtuel hyperv. La valeur par défaut affiche le premier commutateur trouvé (pilote hyperv uniquement).", "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "", "The initial time interval for each check that wait performs in seconds": "", @@ -445,10 +435,14 @@ "The name of the node to delete": "", "The name of the node to start": "", "The node to get logs from. Defaults to the primary control plane.": "", + "The node to ssh into. Defaults to the primary control plane.": "", + "The none driver is not compatible with multi-node clusters.": "", + "The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "", "The number of bytes to use for 9p packet payload": "", + "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", - "The podman service within '{{.profile}}' is not active": "", + "The podman service within '{{.cluster}}' is not active": "", "The service namespace": "", "The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "", "The services namespace": "", @@ -457,19 +451,24 @@ "The value passed to --format is invalid: {{.error}}": "", "The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "", "The {{.driver_name}} driver should not be used with root privileges.": "Le pilote {{.driver_name}} ne doit pas être utilisé avec des droits racine.", + "There is no local cluster named \"{{.cluster}}\"": "", "There's a new version for '{{.driver_executable}}'. Please consider upgrading. {{.documentation_url}}": "Une nouvelle version de \"{{.driver_executable}}\" est disponible. Pensez à effectuer la mise à niveau. {{.documentation_url}}", "These changes will take effect upon a minikube delete and then a minikube start": "", "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "Cette opération peut également être réalisée en définissant la variable d'environment \"CHANGE_MINIKUBE_NONE_USER=true\".", + "This control plane is not running! (state={{.state}})": "", + "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "Cela permet de conserver le contexte kubectl existent et de créer un contexte minikube.", "This will start the mount daemon and automatically mount files into minikube": "Cela permet de lancer le daemon d'installation et d'installer automatiquement les fichiers dans minikube.", "This will start the mount daemon and automatically mount files into minikube.": "", + "Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "", "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "Conseil : Pour supprimer ce cluster appartenant à la racine, exécutez la commande \"sudo {{.cmd}} delete\".", "To connect to this cluster, use: kubectl --context={{.name}}": "Pour vous connecter à ce cluster, utilisez la commande \"kubectl --context={{.name}}\".", "To connect to this cluster, use: kubectl --context={{.name}}__1": "Pour vous connecter à ce cluster, utilisez la commande \"kubectl --context={{.name}}\".", "To connect to this cluster, use: kubectl --context={{.profile_name}}": "", "To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "", - "To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "", + "To fix this, run: {{.command}}": "", + "To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "", "To see addons list for other profiles use: `minikube addons -p name list`": "", "To start minikube with HyperV Powershell must be in your PATH`": "", "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "Pour utiliser les commandes kubectl ou minikube sous votre propre nom d'utilisateur, vous devrez peut-être les déplacer. Par exemple, pour écraser vos propres paramètres, exécutez la commande suivante :", @@ -479,24 +478,31 @@ "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", + "Unable to find control plane": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get VM IP address": "", "Unable to get addon status for {{.name}}: {{.error}}": "", "Unable to get bootstrapper: {{.error}}": "Impossible d'obtenir l'amorceur : {{.error}}", + "Unable to get command runner": "", + "Unable to get control plane status: {{.error}}": "", "Unable to get current user": "", + "Unable to get driver IP": "", + "Unable to get machine status": "", "Unable to get runtime": "", - "Unable to get the status of the {{.name}} cluster.": "", "Unable to kill mount process: {{.error}}": "", "Unable to load cached images from config file.": "Impossible de charger les images mises en cache depuis le fichier de configuration.", "Unable to load cached images: {{.error}}": "", "Unable to load config: {{.error}}": "Impossible de charger la configuration : {{.error}}", + "Unable to load host": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "Impossible d'analyser la version \"{{.kubernetes_version}}\" : {{.error}}", "Unable to parse default Kubernetes version from constants: {{.error}}": "", + "Unable to parse memory '{{.memory}}': {{.error}}": "", "Unable to parse oldest Kubernetes version from constants: {{.error}}": "", "Unable to pull images, which may be OK: {{.error}}": "Impossible d'extraire des images, qui sont peut-être au bon format : {{.error}}", - "Unable to remove machine directory: %v": "", - "Unable to start VM. Please investigate and run 'minikube delete' if possible": "", + "Unable to remove machine directory": "", + "Unable to restart cluster, will reset it: {{.error}}": "", + "Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "", "Unable to stop VM": "", "Unable to update {{.driver}} driver: {{.error}}": "", "Unable to verify SSH connectivity: {{.error}}. Will retry...": "", @@ -507,6 +513,8 @@ "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "", "Unset variables instead of setting them": "", "Update server returned an empty list": "", + "Updating node": "", + "Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "", "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "", "Upgrading from Kubernetes {{.old}} to {{.new}}": "Mise à niveau de Kubernetes de la version {{.old}} à la version {{.new}}…", "Usage": "Usage", @@ -527,11 +535,11 @@ "Userspace file server:": "", "Using image repository {{.name}}": "Utilisation du dépôt d'images {{.name}}…", "Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "", - "Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "", "Using the {{.driver}} driver based on existing profile": "", "Using the {{.driver}} driver based on user configuration": "", "VM driver is one of: %v": "Le pilote de la VM appartient à : %v", "VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "", + "Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "", "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "", "Verify the IP address of the running cluster in kubeconfig.": "", "Verifying dashboard health ...": "", @@ -548,12 +556,13 @@ "Wait failed: {{.error}}": "", "Wait until Kubernetes core services are healthy before exiting": "Avant de quitter, veuillez patienter jusqu'à ce que les principaux services Kubernetes soient opérationnels.", "Waiting for SSH access ...": "En attente de l'accès SSH...", - "Waiting for cluster to come online ...": "", "Waiting for:": "En attente de :", "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "Emplacement permettant d'accéder aux partages NFS en mode root, la valeur par défaut affichant /nfsshares (pilote hyperkit uniquement).", "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "", "You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "Il semble que vous utilisiez un proxy, mais votre environment NO_PROXY n'inclut pas l'adresse IP ({{.ip_address}}) de minikube. Consultez la documentation à l'adresse {{.documentation_url}} pour en savoir plus.", + "You can also use 'minikube kubectl -- get pods' to invoke a matching version": "", "You can delete them using the following command(s):": "", + "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "", "You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "Vous devrez peut-être supprimer la VM \"{{.name}}\" manuellement de votre hyperviseur.", "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "", "You must specify a service name": "", @@ -562,45 +571,43 @@ "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "", "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "", "Your minikube vm is not running, try minikube start.": "", + "adding node": "", "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "", "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "", "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "", - "api load": "", "bash completion failed": "", "call with cleanup=true to remove old tunnels": "", - "command runner": "", "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "", "config view failed": "", - "creating api client": "", "dashboard service is not running: {{.error}}": "", + "deleting node": "", "disable failed": "", "dry-run mode. Validates configuration, but does not mutate system state": "", "dry-run validation complete!": "", "enable failed": "", "error creating clientset": "", - "error creating machine client": "", "error getting primary control plane": "", "error getting ssh port": "", "error parsing the input ip address for mount": "", "error starting tunnel": "", "error stopping tunnel": "", "failed to open browser: {{.error}}": "", - "getting config": "", - "getting primary control plane": "", + "generating join token": "", "if true, will embed the certs in kubeconfig.": "", "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "", + "initialization failed, will try again: {{.error}}": "", + "joining cluster": "", "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "", "kubectl and minikube configuration will be stored in {{.home_folder}}": "Les configurations kubectl et minikube seront stockées dans le dossier {{.home_folder}}.", "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "", "kubectl proxy": "", - "loading config": "", + "libmachine failed": "", "logdir set failed": "", - "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "", "max time to wait per Kubernetes core services to be healthy.": "", "minikube addons list --output OUTPUT. json, list": "", "minikube is exiting due to an error. If the above message is not useful, open an issue:": "", "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "", - "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "", + "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "", "minikube profile was successfully set to {{.profile_name}}": "", "minikube status --output OUTPUT. json, text": "", "minikube {{.version}} is available! Download it: {{.url}}": "", @@ -609,14 +616,16 @@ "mount failed": "", "namespaces to pause": "", "namespaces to unpause": "", + "none driver does not support multi-node clusters": "", "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", "pause containers": "", "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "", - "profile {{.name}} is not running.": "", "reload cached images.": "", "reloads images previously added using the 'cache add' subcommand": "", "retrieving node": "", + "saving node": "", "service {{.namespace_name}}/{{.service_name}} has no node port": "", + "setting up certs": "", "stat failed": "", "status json failure": "", "status text failure": "", @@ -641,16 +650,16 @@ "usage: minikube delete": "", "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "", "zsh completion failed": "", + "{{.cluster}} IP has been updated to point at {{.ip}}": "", + "{{.cluster}} IP was already correctly configured for {{.ip}}": "", + "{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "", "{{.driver}} does not appear to be installed": "", "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "", "{{.extra_option_component_name}}.{{.key}}={{.value}}": "", - "{{.machine}} IP has been updated to point at {{.ip}}": "", - "{{.machine}} IP was already correctly configured for {{.ip}}": "", - "{{.name}} cluster does not exist": "", "{{.name}} has no available configuration options": "", "{{.name}} is already running": "", "{{.name}} was successfully configured": "", - "{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "", + "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "", "{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.prefix}}minikube {{.version}} sur {{.platform}}", "{{.type}} is not yet a supported filesystem. We will try anyways!": "", "{{.url}} is not accessible: {{.error}}": "" diff --git a/translations/ja.json b/translations/ja.json index 9df68fb233..ae49a12ff5 100644 --- a/translations/ja.json +++ b/translations/ja.json @@ -1,12 +1,11 @@ { "\"The '{{.minikube_addon}}' addon is disabled": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "", "\"{{.minikube_addon}}\" was successfully disabled": "「{{.minikube_addon}}」が無効化されました", "\"{{.name}}\" cluster does not exist. Proceeding ahead with cleanup.": "「{{.name}}」というクラスターは存在しません。クリーンアップ処理を続行します。", "\"{{.name}}\" profile does not exist": "「{{.name}}」というプロファイルは存在しません", "\"{{.name}}\" profile does not exist, trying anyways.": "", - "\"{{.node_name}}\" stopped.": "", "\"{{.profile_name}}\" VM does not exist, nothing to stop": "「{{.profile_name}}」というVMは存在しません。停止すべき対象がありません", - "\"{{.profile_name}}\" does not exist, nothing to stop": "", "\"{{.profile_name}}\" host does not exist, unable to show an IP": "「{{.profile_name}}」というホストは存在しません。IPを表示できません", "\"{{.profile_name}}\" stopped.": "「{{.profile_name}}」が停止しました。", "'none' driver does not support 'minikube docker-env' command": "「none」ドライバーは「minikube docker-env」コマンドをサポートしていません", @@ -14,7 +13,6 @@ "'none' driver does not support 'minikube podman-env' command": "", "'none' driver does not support 'minikube ssh' command": "「none」ドライバーは「minikube ssh」コマンドをサポートしていません", "'{{.driver}}' driver reported an issue: {{.error}}": "「{{.driver}}」ドライバーがエラーを報告しました: {{.error}}", - "'{{.profile}}' is not running": "", "- {{.profile}}": "", "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "", "A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "", @@ -36,12 +34,11 @@ "Adds a node to the given cluster config, and starts it.": "", "Adds a node to the given cluster.": "", "Advanced Commands:": "", - "After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Aliases": "", "Allow user prompts for more information": "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "Docker イメージの pull 元の代替イメージ リポジトリ。これは、gcr.io へのアクセスが制限されている場合に使用できます。これを \\\"auto\\\" に設定すると、minikube によって自動的に指定されるようになります。中国本土のユーザーの場合、registry.cn-hangzhou.aliyuncs.com/google_containers などのローカル gcr.io ミラーを使用できます。", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "minikube VM に割り当てられた RAM 容量(形式: \u003cnumber\u003e[\u003cunit\u003e]、unit = b、k、m、g)", - "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", + "Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", "Amount of time to wait for a service in seconds": "", "Amount of time to wait for service in seconds": "", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", @@ -52,6 +49,7 @@ "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", "Bind Address: {{.Address}}": "", "Block until the apiserver is servicing API requests": "", + "Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "", "Cannot find directory {{.path}} for mount": "", "Cannot use both --output and --format options": "", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", @@ -70,9 +68,9 @@ "Could not process error from failed deletion": "", "Could not process errors from failed deletion": "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "使用するイメージミラーの国コード。グローバルのものを使用する場合は空のままにします。中国本土のユーザーの場合は、「cn」に設定します。", - "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", + "Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", "Creating mount {{.name}} ...": "マウント {{.name}} を作成しています...", - "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", "DEPRECATED, use `driver` instead.": "", "Default group id used for the mount": "", "Default user id used for the mount": "", @@ -101,10 +99,9 @@ "Done! kubectl is now configured to use \"{{.name}}\"": "", "Done! kubectl is now configured to use \"{{.name}}__1": "完了しました。kubectl が「{{.name}}」を使用するよう構成されました", "Download complete!": "ダウンロードが完了しました。", + "Downloading Kubernetes {{.version}} preload ...": "", "Downloading VM boot image ...": "", "Downloading driver {{.driver}}:": "", - "Downloading preloaded images tarball for k8s {{.version}} ...": "", - "Downloading {{.name}} {{.version}}": "", "ERROR creating `registry-creds-acr` secret": "", "ERROR creating `registry-creds-dpr` secret": "", "ERROR creating `registry-creds-ecr` secret: {{.error}}": "", @@ -113,7 +110,6 @@ "Enable addons. see `minikube addons list` for a list of valid addon names.": "", "Enable experimental NVIDIA GPU support in minikube": "minikube での試験運用版 NVIDIA GPU の対応を有効にします", "Enable host resolver for NAT DNS requests (virtualbox driver only)": "NAT DNS リクエスト用のホストリゾルバを有効にします(virtualbox ドライバのみ)", - "Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "", "Enable proxy for NAT DNS requests (virtualbox driver only)": "NAT DNS リクエスト用のプロキシを有効にします(virtualbox ドライバのみ)", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\": "デフォルトの CNI プラグイン(/etc/cni/net.d/k8s.conf)を有効にします。\\\"--network-plugin=cni\\\" と組み合わせて使用されます。", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "", @@ -135,45 +131,29 @@ "Error finding port for mount": "", "Error generating set output": "", "Error generating unset output": "", - "Error getting IP": "", - "Error getting client": "", - "Error getting client: {{.error}}": "", - "Error getting cluster": "", "Error getting cluster bootstrapper": "", "Error getting cluster config": "", - "Error getting config": "", - "Error getting control plane": "", "Error getting host": "", - "Error getting host IP": "", - "Error getting host status": "", - "Error getting machine logs": "", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", "Error getting primary control plane": "", - "Error getting primary cp": "", - "Error getting service status": "", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", "Error getting ssh client": "", "Error getting the host IP address to use from within the VM": "", - "Error host driver ip status": "", "Error killing mount process": "", - "Error loading api": "", - "Error loading profile config": "", "Error loading profile config: {{.error}}": "", "Error loading profile {{.name}}: {{.error}}": "プロファイル {{.name}} の読み込み中にエラーが発生しました。{{.error}}", "Error opening service": "", "Error parsing Driver version: {{.error}}": "Driver バージョンの解析中にエラーが発生しました。{{.error}}", "Error parsing minikube version: {{.error}}": "minikube バージョンの解析中にエラーが発生しました。{{.error}}", "Error reading {{.path}}: {{.error}}": "", - "Error retrieving node": "", "Error starting cluster": "", "Error starting mount": "", - "Error starting node": "", "Error while setting kubectl current context : {{.error}}": "", "Error writing mount pid": "", - "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "", "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "エラー: Kubernetes v{{.new}} が選択されましたが、使用しているプロファイルの既存クラスタで実行されているのは Kubernetes v{{.old}} です。非破壊的なダウングレードはサポートされていませんが、以下のいずれかの方法で続行できます。\n* Kubernetes v{{.new}} を使用してクラスタを再作成する: 「minikube delete {{.profile}}」を実行してから、「minikube start {{.profile}} --kubernetes-version={{.new}}」を実行します。\n* Kubernetes v{{.new}} を使用して 2 つ目のクラスタを作成する: 「minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}」を実行します。\n* Kubernetes v{{.old}} 以降を使用して既存のクラスタを再利用する: 「minikube start {{.profile}} --kubernetes-version={{.old}}」を実行します。", "Error: [{{.id}}] {{.error}}": "", "Examples": "", + "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "", "Exiting": "終了しています", "Exiting.": "終了しています。", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "", @@ -181,38 +161,38 @@ "Failed to cache ISO": "", "Failed to cache and load images": "", "Failed to cache binaries": "", + "Failed to cache images": "", "Failed to cache images to tar": "", "Failed to cache kubectl": "", "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "{{.minikube_dir_path}} に対する権限を変更できませんでした。{{.error}}", - "Failed to check if machine exists": "", "Failed to check main repository and mirrors for images for images": "", "Failed to delete cluster: {{.error}}": "クラスタを削除できませんでした。{{.error}}", "Failed to delete cluster: {{.error}}__1": "クラスタを削除できませんでした。{{.error}}", "Failed to delete images": "", "Failed to delete images from config": "", - "Failed to delete node {{.name}}": "", "Failed to enable container runtime": "", "Failed to generate config": "", + "Failed to get API Server URL": "", "Failed to get bootstrapper": "", "Failed to get command runner": "", - "Failed to get driver URL": "", "Failed to get image map": "", "Failed to get machine client": "", "Failed to get service URL: {{.error}}": "", "Failed to kill mount process: {{.error}}": "マウント プロセスを強制終了できませんでした。{{.error}}", "Failed to list cached images": "", + "Failed to parse kubernetes version": "", "Failed to reload cached images": "", "Failed to save config": "", "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "NO_PROXY 環境変数を設定できませんでした。「export NO_PROXY=$NO_PROXY,{{.ip}}」を使用してください。", "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "", "Failed to setup certs": "", "Failed to setup kubeconfig": "", - "Failed to start node {{.name}}": "", "Failed to stop node {{.name}}": "", "Failed to update cluster": "", "Failed to update config": "", "Failed unmount: {{.error}}": "", "File permissions used for the mount": "", + "Filter to use only VM Drivers": "", "Flags": "", "Follow": "", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "最適な結果を得るには、kubectl を次のサイト https://kubernetes.io/docs/tasks/tools/install-kubectl/ からインストールしてください", @@ -222,13 +202,16 @@ "Force minikube to perform possibly dangerous operations": "minikube で危険な可能性のある操作を強制的に実行します", "Found network options:": "ネットワーク オプションが見つかりました。", "Found {{.number}} invalid profile(s) !": "", + "Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Generate unable to parse memory '{{.memory}}': {{.error}}": "", "Gets the kubernetes URL(s) for the specified service in your local cluster": "", "Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "", "Gets the logs of the running instance, used for debugging minikube, not user code.": "", "Gets the status of a local kubernetes cluster": "", "Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "", "Gets the value of PROPERTY_NAME from the minikube config file": "", - "Getting machine config failed": "", + "Getting bootstrapper": "", + "Getting primary control plane": "", "Global Flags": "", "Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "", "Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", @@ -239,6 +222,7 @@ "Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", + "If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "", "If set, install addons. Defaults to true.": "", "If set, pause all namespaces": "", "If set, unpause all namespaces": "", @@ -255,8 +239,9 @@ "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "Docker デーモンに渡す Docker レジストリが安全ではありません。デフォルトのサービス CIDR 範囲が自動的に追加されます。", "Install VirtualBox, or select an alternative value for --driver": "", "Install the latest hyperkit binary, and run 'minikube delete'": "", - "Invalid size passed in argument: {{.error}}": "", "IsEnabled failed": "", + "Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "", + "Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "", "Kill the mount process spawned by minikube start": "", "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "", "Kubernetes {{.version}} is not supported by this release of minikube": "", @@ -271,7 +256,7 @@ "Local folders to share with Guest via NFS mounts (hyperkit driver only)": "NFS マウントを介してゲストと共有するローカル フォルダ(hyperkit ドライバのみ)", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "ネットワーキングに使用する VPNKit ソケットのロケーション。空の場合、Hyperkit VPNKitSock が無効になり、「auto」の場合、Mac VPNKit 接続に Docker が使用され、それ以外の場合、指定された VSock が使用されます(hyperkit ドライバのみ)", "Location of the minikube iso": "minikube iso のロケーション", - "Location of the minikube iso.": "", + "Locations to fetch the minikube ISO from.": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "", "Message Size: {{.size}}": "", @@ -289,15 +274,18 @@ "NOTE: This process must stay alive for the mount to be accessible ...": "", "Networking and Connectivity Commands:": "", "No minikube profile was found. You can create one using `minikube start`.": "", + "Node \"{{.node_name}}\" stopped.": "", "Node may be unable to resolve external DNS records": "", "Node operations": "", "Node {{.name}} was successfully deleted.": "", + "Node {{.nodeName}} does not exist.": "", + "Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "", "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "使用しているロケーション内で既知のいずれのリポジトリにもアクセスできません。フォールバックとして {{.image_repository_name}} を使用します。", "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "既知のいずれのリポジトリにもアクセスできません。--image-repository フラグとともに代替のイメージ リポジトリを指定することを検討してください。", "Not passing {{.name}}={{.value}} to docker env.": "", - "Noticed that you are using minikube docker-env:": "", + "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "", + "Number of CPUs allocated to Kubernetes.": "", "Number of CPUs allocated to the minikube VM": "minikube VM に割り当てられた CPU の数", - "Number of CPUs allocated to the minikube VM.": "", "Number of lines back to go within the log": "", "OS release is {{.pretty_name}}": "", "Open the addons URL with https instead of http": "", @@ -318,6 +306,7 @@ "Please install the minikube hyperkit VM driver, or select an alternative --driver": "", "Please install the minikube kvm2 VM driver, or select an alternative --driver": "", "Please make sure the service you are looking for is deployed or is in the correct namespace.": "", + "Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "", "Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "「{{.driver_executable}}」をアップグレードしてください。{{.documentation_url}}", "Populates the specified folder with documentation in markdown about minikube": "", @@ -331,10 +320,10 @@ "Profile gets or sets the current minikube profile": "", "Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "", "Provide VM UUID to restore MAC address (hyperkit driver only)": "MAC アドレスを復元するための VM UUID を指定します(hyperkit ドライバのみ)", + "Pulling base image ...": "", "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "", "Rebuild libvirt with virt-network support": "", "Received {{.name}} signal": "", - "Reconfiguring existing host ...": "", "Registry mirrors to pass to the Docker daemon": "Docker デーモンに渡すレジストリ ミラー", "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "", "Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "", @@ -345,7 +334,10 @@ "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "", "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "リクエストされたディスクサイズ {{.requested_size}} が最小値 {{.minimum_size}} 未満です", "Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "リクエストされたメモリ割り当て({{.memory}} MB)がデフォルトのメモリ割り当て {{.default_memorysize}} MB 未満です。minikube が正常に動作しないか、予期せずクラッシュする可能性があることに注意してください。", + "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "", "Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "リクエストされたメモリ割り当て {{.requested_size}} が許可される最小値 {{.minimum_size}} 未満です", + "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "", + "Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "", "Retrieve the ssh identity key path of the specified cluster": "", "Retrieve the ssh identity key path of the specified cluster.": "", "Retrieves the IP address of the running cluster": "", @@ -358,8 +350,10 @@ "Run minikube from the C: drive.": "", "Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "", "Run the minikube command as an Administrator": "", + "Run: \"{{.delete}}\", then \"{{.start}} --alsologtostderr -v=1\" to try again with more logging": "", "Run: 'chmod 600 $HOME/.kube/config'": "", "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "", "Set failed": "", "Set flag to delete all profiles": "", "Set this flag to delete the '.minikube' folder from your user directory.": "", @@ -385,8 +379,8 @@ "Specify the 9p version that the mount should use": "", "Specify the ip that the mount should be setup on": "", "Specify the mount filesystem type (supported types: 9p)": "", - "Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "", - "Starting node": "", + "StartHost failed again: {{.error}}": "", + "StartHost failed, but will try again: {{.error}}": "", "Starting tunnel for service {{.service}}.": "", "Starts a local kubernetes cluster": "ローカルの Kubernetes クラスタを起動します", "Starts a node.": "", @@ -399,7 +393,6 @@ "Successfully added {{.name}} to {{.cluster}}!": "", "Successfully deleted all profiles": "", "Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "", - "Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "", "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "", "Suggestion: {{.advice}}": "", "Suggestion: {{.fix}}": "", @@ -431,12 +424,16 @@ "The cluster dns domain name used in the kubernetes cluster": "Kubernetes クラスタで使用されるクラスタ DNS ドメイン名", "The container runtime to be used (docker, crio, containerd)": "使用されるコンテナ ランタイム(docker、crio、containerd)", "The container runtime to be used (docker, crio, containerd).": "", + "The control plane for \"{{.name}}\" is paused!": "", + "The control plane node \"{{.name}}\" does not exist.": "", + "The control plane node is not running (state={{.state}})": "", + "The control plane node must be running for this command": "", "The cri socket path to be used": "使用される CRI ソケットパス", "The cri socket path to be used.": "", - "The docker service within '{{.profile}}' is not active": "", + "The docker service within '{{.name}}' is not active": "", + "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", "The driver '{{.driver}}' is not supported on {{.os}}": "ドライバ「{{.driver}}」は、{{.os}} ではサポートされていません", - "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "", - "The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "", + "The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "hyperv 仮想スイッチ名。最初に見つかったものにデフォルト設定されます(hyperv ドライバのみ)", "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "", "The initial time interval for each check that wait performs in seconds": "", @@ -448,10 +445,14 @@ "The name of the node to delete": "", "The name of the node to start": "", "The node to get logs from. Defaults to the primary control plane.": "", + "The node to ssh into. Defaults to the primary control plane.": "", + "The none driver is not compatible with multi-node clusters.": "", + "The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "", "The number of bytes to use for 9p packet payload": "", + "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", - "The podman service within '{{.profile}}' is not active": "", + "The podman service within '{{.cluster}}' is not active": "", "The service namespace": "", "The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "", "The services namespace": "", @@ -460,19 +461,24 @@ "The value passed to --format is invalid: {{.error}}": "", "The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "", "The {{.driver_name}} driver should not be used with root privileges.": "{{.driver_name}} ドライバをルート権限で使用しないでください。", + "There is no local cluster named \"{{.cluster}}\"": "", "There's a new version for '{{.driver_executable}}'. Please consider upgrading. {{.documentation_url}}": "「{{.driver_executable}}」の新しいバージョンがあります。アップグレードを検討してください。{{.documentation_url}}", "These changes will take effect upon a minikube delete and then a minikube start": "", "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "これは環境変数 CHANGE_MINIKUBE_NONE_USER=true を設定して自動的に行うこともできます", + "This control plane is not running! (state={{.state}})": "", + "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "これにより既存の kubectl コンテキストが保持され、minikube コンテキストが作成されます。", "This will start the mount daemon and automatically mount files into minikube": "これによりマウント デーモンが起動し、ファイルが minikube に自動的にマウントされます", "This will start the mount daemon and automatically mount files into minikube.": "", + "Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "", "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "ヒント: この root 所有のクラスタを削除するには、「sudo {{.cmd}} delete」を実行します", "To connect to this cluster, use: kubectl --context={{.name}}": "このクラスタに接続するには、「kubectl --context={{.name}}」を使用します", "To connect to this cluster, use: kubectl --context={{.name}}__1": "このクラスタに接続するには、「kubectl --context={{.name}}」を使用します", "To connect to this cluster, use: kubectl --context={{.profile_name}}": "", "To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "", - "To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "", + "To fix this, run: {{.command}}": "", + "To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "", "To see addons list for other profiles use: `minikube addons -p name list`": "", "To start minikube with HyperV Powershell must be in your PATH`": "", "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "kubectl か minikube コマンドを独自のユーザーとして使用するには、そのコマンドの再配置が必要な場合があります。たとえば、独自の設定を上書きするには、以下を実行します。", @@ -482,24 +488,31 @@ "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", + "Unable to find control plane": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get VM IP address": "", "Unable to get addon status for {{.name}}: {{.error}}": "", "Unable to get bootstrapper: {{.error}}": "ブートストラッパを取得できません。{{.error}}", + "Unable to get command runner": "", + "Unable to get control plane status: {{.error}}": "", "Unable to get current user": "", + "Unable to get driver IP": "", + "Unable to get machine status": "", "Unable to get runtime": "", - "Unable to get the status of the {{.name}} cluster.": "", "Unable to kill mount process: {{.error}}": "", "Unable to load cached images from config file.": "キャッシュに保存されているイメージを構成ファイルから読み込むことができません。", "Unable to load cached images: {{.error}}": "", "Unable to load config: {{.error}}": "構成を読み込むことができません。{{.error}}", + "Unable to load host": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "「{{.kubernetes_version}}」を解析できません。{{.error}}", "Unable to parse default Kubernetes version from constants: {{.error}}": "", + "Unable to parse memory '{{.memory}}': {{.error}}": "", "Unable to parse oldest Kubernetes version from constants: {{.error}}": "", "Unable to pull images, which may be OK: {{.error}}": "イメージを pull できませんが、問題ありません。{{.error}}", - "Unable to remove machine directory: %v": "", - "Unable to start VM. Please investigate and run 'minikube delete' if possible": "", + "Unable to remove machine directory": "", + "Unable to restart cluster, will reset it: {{.error}}": "", + "Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "", "Unable to stop VM": "", "Unable to update {{.driver}} driver: {{.error}}": "", "Unable to verify SSH connectivity: {{.error}}. Will retry...": "", @@ -510,6 +523,8 @@ "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "", "Unset variables instead of setting them": "", "Update server returned an empty list": "", + "Updating node": "", + "Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "", "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "", "Upgrading from Kubernetes {{.old}} to {{.new}}": "Kubernetes を {{.old}} から {{.new}} にアップグレードしています", "Usage": "", @@ -530,11 +545,11 @@ "Userspace file server:": "", "Using image repository {{.name}}": "イメージ リポジトリ {{.name}} を使用しています", "Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "", - "Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "", "Using the {{.driver}} driver based on existing profile": "", "Using the {{.driver}} driver based on user configuration": "", "VM driver is one of: %v": "VM ドライバは次のいずれかです。%v", "VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "", + "Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "", "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "", "Verify the IP address of the running cluster in kubeconfig.": "", "Verifying dashboard health ...": "", @@ -549,11 +564,12 @@ "Wait failed": "", "Wait failed: {{.error}}": "", "Wait until Kubernetes core services are healthy before exiting": "Kubernetes コアサービスが正常になるまで待機してから終了してください", - "Waiting for cluster to come online ...": "", "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "NFS 共有のルートに指定する場所。デフォルトは /nfsshares(hyperkit ドライバのみ)", "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "", "You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "プロキシを使用しようとしていますが、現在の NO_PROXY 環境に minikube IP({{.ip_address}})は含まれていません。詳細については、{{.documentation_url}} をご覧ください", + "You can also use 'minikube kubectl -- get pods' to invoke a matching version": "", "You can delete them using the following command(s):": "", + "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "", "You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "ハイパーバイザから「{{.name}}」VM を手動で削除することが必要な可能性があります", "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "", "You must specify a service name": "", @@ -562,45 +578,43 @@ "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "", "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "", "Your minikube vm is not running, try minikube start.": "", + "adding node": "", "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "", "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "", "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "", - "api load": "", "bash completion failed": "", "call with cleanup=true to remove old tunnels": "", - "command runner": "", "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "", "config view failed": "", - "creating api client": "", "dashboard service is not running: {{.error}}": "", + "deleting node": "", "disable failed": "", "dry-run mode. Validates configuration, but does not mutate system state": "", "dry-run validation complete!": "", "enable failed": "", "error creating clientset": "", - "error creating machine client": "", "error getting primary control plane": "", "error getting ssh port": "", "error parsing the input ip address for mount": "", "error starting tunnel": "", "error stopping tunnel": "", "failed to open browser: {{.error}}": "", - "getting config": "", - "getting primary control plane": "", + "generating join token": "", "if true, will embed the certs in kubeconfig.": "", "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "", + "initialization failed, will try again: {{.error}}": "", + "joining cluster": "", "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "", "kubectl and minikube configuration will be stored in {{.home_folder}}": "kubectl と minikube の構成は {{.home_folder}} に保存されます", "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "", "kubectl proxy": "", - "loading config": "", + "libmachine failed": "", "logdir set failed": "", - "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "", "max time to wait per Kubernetes core services to be healthy.": "", "minikube addons list --output OUTPUT. json, list": "", "minikube is exiting due to an error. If the above message is not useful, open an issue:": "", "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "", - "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "", + "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "", "minikube profile was successfully set to {{.profile_name}}": "", "minikube status --output OUTPUT. json, text": "", "minikube {{.version}} is available! Download it: {{.url}}": "", @@ -609,14 +623,16 @@ "mount failed": "", "namespaces to pause": "", "namespaces to unpause": "", + "none driver does not support multi-node clusters": "", "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", "pause containers": "", "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "", - "profile {{.name}} is not running.": "", "reload cached images.": "", "reloads images previously added using the 'cache add' subcommand": "", "retrieving node": "", + "saving node": "", "service {{.namespace_name}}/{{.service_name}} has no node port": "", + "setting up certs": "", "stat failed": "", "status json failure": "", "status text failure": "", @@ -641,16 +657,16 @@ "usage: minikube delete": "", "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "", "zsh completion failed": "", + "{{.cluster}} IP has been updated to point at {{.ip}}": "", + "{{.cluster}} IP was already correctly configured for {{.ip}}": "", + "{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "", "{{.driver}} does not appear to be installed": "", "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "", "{{.extra_option_component_name}}.{{.key}}={{.value}}": "", - "{{.machine}} IP has been updated to point at {{.ip}}": "", - "{{.machine}} IP was already correctly configured for {{.ip}}": "", - "{{.name}} cluster does not exist": "", "{{.name}} has no available configuration options": "", "{{.name}} is already running": "", "{{.name}} was successfully configured": "", - "{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "", + "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "", "{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.platform}} 上の {{.prefix}}minikube {{.version}}", "{{.type}} is not yet a supported filesystem. We will try anyways!": "", "{{.url}} is not accessible: {{.error}}": "" diff --git a/translations/ko.json b/translations/ko.json index 784b8736a1..9fdde65376 100644 --- a/translations/ko.json +++ b/translations/ko.json @@ -1,5 +1,6 @@ { "\"The '{{.minikube_addon}}' addon is disabled": "\"The '{{.minikube_addon}}' 이 비활성화되었습니다", + "\"{{.machineName}}\" does not exist, nothing to stop": "", "\"{{.name}}\" profile does not exist": "\"{{.name}}\" 프로필이 존재하지 않습니다", "\"{{.name}}\" profile does not exist, trying anyways.": "\"{{.name}}\" 프로필이 존재하지 않습니다, 그럼에도 불구하고 시도합니다", "\"{{.node_name}}\" stopped.": "\"{{.node_name}}\" 이 중단되었습니다", @@ -29,11 +30,11 @@ "Adds a node to the given cluster config, and starts it.": "노드 하나를 주어진 클러스터 컨피그에 추가하고 시작합니다", "Adds a node to the given cluster.": "노드 하나를 주어진 클러스터에 추가합니다", "Advanced Commands:": "고급 커맨드:", - "After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Aliases": "", "Allow user prompts for more information": "많은 정보를 위해 사용자 프롬프트를 허가합니다", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "", - "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "minikube 가상 머신에 할당할 RAM 의 용량 (format: [], where unit = b, k, m or g)", + "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "minikube 가상 머신에 할당할 RAM 의 용량 (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)", + "Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", "Amount of time to wait for a service in seconds": "", "Amount of time to wait for service in seconds": "", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "VirtualBox 와 같은 또 다른 하이퍼바이저가 KVM 과 충돌이 발생합니다. 다른 하이퍼바이저를 중단하거나 --driver 로 변경하세요", @@ -44,6 +45,7 @@ "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", "Bind Address: {{.Address}}": "", "Block until the apiserver is servicing API requests": "apiserver 가 API 요청을 서비스할 때까지 막습니다", + "Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "", "Cannot find directory {{.path}} for mount": "마운트하기 위한 디렉토리 {{.path}} 를 찾을 수 없습니다", "Cannot use both --output and --format options": "--output 과 --format 옵션을 함께 사용할 수 없습니다", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", @@ -62,9 +64,10 @@ "Could not process error from failed deletion": "", "Could not process errors from failed deletion": "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "", - "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", + "Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", "Creating mount {{.name}} ...": "", "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "{{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) 를 생성하는 중 ...", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", "DEPRECATED, use `driver` instead.": "DEPRECATED 되었습니다, 'driver' 를 사용하세요", "Default group id used for the mount": "마운트를 위한 디폴트 group id", "Default user id used for the mount": "마운트를 위한 디폴트 user id", @@ -89,9 +92,9 @@ "Documentation: {{.url}}": "문서: {{.url}}", "Done! kubectl is now configured to use \"{{.name}}\"": "끝났습니다! 이제 kubectl 이 \"{{.name}}\" 를 사용할 수 있도록 설정되었습니다", "Download complete!": "다운로드가 성공하였습니다!", + "Downloading Kubernetes {{.version}} preload ...": "", "Downloading VM boot image ...": "가상 머신 부트 이미지 다운로드 중 ...", "Downloading driver {{.driver}}:": "드라이버 {{.driver}} 다운로드 중 :", - "Downloading preloaded images tarball for k8s {{.version}} ...": "", "Downloading {{.name}} {{.version}}": "{{.name}} {{.version}} 다운로드 중", "ERROR creating `registry-creds-acr` secret": "registry-creds-acr` secret 생성 오류", "ERROR creating `registry-creds-dpr` secret": "`registry-creds-dpr` secret 생성 오류", @@ -101,7 +104,6 @@ "Enable addons. see `minikube addons list` for a list of valid addon names.": "", "Enable experimental NVIDIA GPU support in minikube": "", "Enable host resolver for NAT DNS requests (virtualbox driver only)": "", - "Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "", "Enable proxy for NAT DNS requests (virtualbox driver only)": "", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "", "Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list": "", @@ -128,19 +130,16 @@ "Error getting cluster bootstrapper": "클러스터 부트스트래퍼 조회 오류", "Error getting cluster config": "클러스터 컨피그 조회 오류", "Error getting config": "컨피그 조회 오류", - "Error getting control plane": "", "Error getting host": "호스트 조회 오류", "Error getting host IP": "호스트 IP 조회 오류", "Error getting host status": "호스트 상태 조회 오류", "Error getting machine logs": "머신 로그 조회 오류", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", "Error getting primary control plane": "", - "Error getting primary cp": "", "Error getting service status": "서비스 상태 조회 오류", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", "Error getting ssh client": "ssh 클라이언트 조회 오류", "Error getting the host IP address to use from within the VM": "", - "Error host driver ip status": "", "Error killing mount process": "", "Error loading api": "api 로딩 오류", "Error loading profile config": "프로필 컨피그 로딩 오류", @@ -148,15 +147,14 @@ "Error opening service": "", "Error parsing minikube version: {{.error}}": "minikube 버전 파싱 오류: {{.error}}", "Error reading {{.path}}: {{.error}}": "", - "Error retrieving node": "", "Error starting cluster": "클러스터 시작 오류", "Error starting mount": "마운트 시작 오류", "Error starting node": "노드 시작 오류", "Error while setting kubectl current context : {{.error}}": "kubectl current context 설정 오류 : {{.error}}", "Error writing mount pid": "", - "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "", "Error: [{{.id}}] {{.error}}": "", "Examples": "예시", + "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "", "Exiting": "", "Exiting.": "", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "", @@ -164,6 +162,7 @@ "Failed to cache ISO": "ISO 캐싱에 실패하였습니다", "Failed to cache and load images": "이미지 캐싱 및 로딩에 실패하였습니다", "Failed to cache binaries": "바이너리 캐싱에 실패하였습니다", + "Failed to cache images": "", "Failed to cache images to tar": "이미지를 tar 로 캐싱하는 데 실패하였습니다", "Failed to cache kubectl": "kubectl 캐싱에 실패하였습니다", "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "{{.minikube_dir_path}} 의 권한 변경에 실패하였습니다: {{.error}}", @@ -175,6 +174,7 @@ "Failed to delete node {{.name}}": "노드 {{.name}} 제거에 실패하였습니다", "Failed to enable container runtime": "컨테이너 런타임 활성화에 실패하였습니다", "Failed to generate config": "컨피그 생성에 실패하였습니다", + "Failed to get API Server URL": "", "Failed to get bootstrapper": "부트스트래퍼 조회에 실패하였습니다", "Failed to get command runner": "", "Failed to get driver URL": "드라이버 URL 조회에 실패하였습니다", @@ -183,6 +183,7 @@ "Failed to get service URL: {{.error}}": "서비스 URL 조회에 실패하였습니다: {{.error}}", "Failed to kill mount process: {{.error}}": "마운트 프로세스 중지에 실패하였습니다: {{.error}}", "Failed to list cached images": "캐시된 이미지를 조회하는 데 실패하였습니다", + "Failed to parse kubernetes version": "", "Failed to reload cached images": "캐시된 이미지를 다시 불러오는 데 실패하였습니다", "Failed to save config": "컨피그 저장에 실패하였습니다", "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "", @@ -194,6 +195,7 @@ "Failed to update config": "컨피그를 수정하는 데 실패하였습니다", "Failed unmount: {{.error}}": "마운트 해제에 실패하였습니다: {{.error}}", "File permissions used for the mount": "", + "Filter to use only VM Drivers": "", "Flags": "", "Follow": "", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "", @@ -202,13 +204,17 @@ "Force minikube to perform possibly dangerous operations": "", "Found network options:": "네트워크 옵션을 찾았습니다", "Found {{.number}} invalid profile(s) !": "{{.number}} 개의 무효한 프로필을 찾았습니다", + "Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Generate unable to parse memory '{{.memory}}': {{.error}}": "", "Gets the kubernetes URL(s) for the specified service in your local cluster": "", "Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "", "Gets the logs of the running instance, used for debugging minikube, not user code.": "", "Gets the status of a local kubernetes cluster": "", "Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "", "Gets the value of PROPERTY_NAME from the minikube config file": "", + "Getting bootstrapper": "", "Getting machine config failed": "머신 컨피그 조회 실패", + "Getting primary control plane": "", "Global Flags": "", "Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "", "Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", @@ -219,6 +225,7 @@ "Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", + "If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "", "If set, install addons. Defaults to true.": "", "If set, pause all namespaces": "", "If set, unpause all namespaces": "", @@ -233,8 +240,9 @@ "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "", "Install VirtualBox, or select an alternative value for --driver": "", "Install the latest hyperkit binary, and run 'minikube delete'": "", - "Invalid size passed in argument: {{.error}}": "", "IsEnabled failed": "", + "Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "", + "Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "", "Kill the mount process spawned by minikube start": "", "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "", "Kubernetes {{.version}} is not supported by this release of minikube": "", @@ -248,7 +256,7 @@ "Lists the URLs for the services in your local cluster": "", "Local folders to share with Guest via NFS mounts (hyperkit driver only)": "", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "", - "Location of the minikube iso.": "", + "Locations to fetch the minikube ISO from.": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "", "Message Size: {{.size}}": "", @@ -266,14 +274,17 @@ "NOTE: This process must stay alive for the mount to be accessible ...": "", "Networking and Connectivity Commands:": "", "No minikube profile was found. You can create one using `minikube start`.": "", + "Node \"{{.node_name}}\" stopped.": "", "Node may be unable to resolve external DNS records": "", "Node operations": "", "Node {{.name}} was successfully deleted.": "", + "Node {{.nodeName}} does not exist.": "", + "Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "", "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "", "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "", "Not passing {{.name}}={{.value}} to docker env.": "", - "Noticed that you are using minikube docker-env:": "", - "Number of CPUs allocated to the minikube VM.": "", + "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "", + "Number of CPUs allocated to Kubernetes.": "", "Number of lines back to go within the log": "", "OS release is {{.pretty_name}}": "", "Open the addons URL with https instead of http": "", @@ -294,6 +305,7 @@ "Please install the minikube hyperkit VM driver, or select an alternative --driver": "", "Please install the minikube kvm2 VM driver, or select an alternative --driver": "", "Please make sure the service you are looking for is deployed or is in the correct namespace.": "", + "Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "", "Populates the specified folder with documentation in markdown about minikube": "", "Powering off \"{{.profile_name}}\" via SSH ...": "", @@ -306,10 +318,10 @@ "Profile gets or sets the current minikube profile": "", "Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "", "Provide VM UUID to restore MAC address (hyperkit driver only)": "", + "Pulling base image ...": "", "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "", "Rebuild libvirt with virt-network support": "", "Received {{.name}} signal": "", - "Reconfiguring existing host ...": "", "Registry mirrors to pass to the Docker daemon": "", "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "", "Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "", @@ -318,8 +330,9 @@ "Removing {{.directory}} ...": "", "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "", "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "", - "Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "", - "Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "", + "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "", + "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "", + "Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "", "Retrieve the ssh identity key path of the specified cluster": "", "Retrieve the ssh identity key path of the specified cluster.": "", "Retrieves the IP address of the running cluster": "", @@ -332,8 +345,10 @@ "Run minikube from the C: drive.": "", "Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "", "Run the minikube command as an Administrator": "minikube 명령어를 관리자 권한으로 실행합니다", + "Run: \"{{.delete}}\", then \"{{.start}} --alsologtostderr -v=1\" to try again with more logging": "", "Run: 'chmod 600 $HOME/.kube/config'": "", "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "", "Set failed": "설정이 실패하였습니다", "Set flag to delete all profiles": "", "Set this flag to delete the '.minikube' folder from your user directory.": "", @@ -359,7 +374,8 @@ "Specify the 9p version that the mount should use": "", "Specify the ip that the mount should be setup on": "", "Specify the mount filesystem type (supported types: 9p)": "", - "Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "", + "StartHost failed again: {{.error}}": "", + "StartHost failed, but will try again: {{.error}}": "", "Starting node": "노드를 시작하는 중", "Starting tunnel for service {{.service}}.": "", "Starts a local kubernetes cluster": "로컬 쿠버네티스 클러스터를 시작합니다", @@ -373,7 +389,6 @@ "Successfully added {{.name}} to {{.cluster}}!": "{{.name}} 를 {{.cluster}} 에 성공적으로 추가하였습니다!", "Successfully deleted all profiles": "모든 프로필이 성공적으로 삭제되었습니다", "Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "", - "Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "", "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "", "Suggestion: {{.advice}}": "", "Suggestion: {{.fix}}": "", @@ -399,24 +414,32 @@ "The argument to pass the minikube mount command on start.": "", "The cluster dns domain name used in the kubernetes cluster": "", "The container runtime to be used (docker, crio, containerd).": "", + "The control plane for \"{{.name}}\" is paused!": "", + "The control plane node \"{{.name}}\" does not exist.": "", + "The control plane node is not running (state={{.state}})": "", + "The control plane node must be running for this command": "", "The cri socket path to be used.": "", - "The docker service within '{{.profile}}' is not active": "", - "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "", - "The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "", + "The docker service within '{{.name}}' is not active": "", + "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", + "The driver '{{.driver}}' is not supported on {{.os}}": "", + "The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "", "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "", "The initial time interval for each check that wait performs in seconds": "", - "The kubernetes version that the minikube VM will use (ex: v1.2.3)": "", "The machine-driver specified is failing to start. Try running 'docker-machine-driver-\u003ctype\u003e version'": "", "The minikube VM is offline. Please run 'minikube start' to start it again.": "", "The name of the network plugin.": "", "The name of the node to delete": "", "The name of the node to start": "", "The node to get logs from. Defaults to the primary control plane.": "", + "The node to ssh into. Defaults to the primary control plane.": "", + "The none driver is not compatible with multi-node clusters.": "", + "The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "", "The number of bytes to use for 9p packet payload": "", + "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", - "The podman service within '{{.profile}}' is not active": "", + "The podman service within '{{.cluster}}' is not active": "", "The service namespace": "", "The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "", "The services namespace": "", @@ -424,16 +447,20 @@ "The value passed to --format is invalid": "", "The value passed to --format is invalid: {{.error}}": "", "The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "", + "There is no local cluster named \"{{.cluster}}\"": "", "These changes will take effect upon a minikube delete and then a minikube start": "", "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "", + "This control plane is not running! (state={{.state}})": "", + "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "", "This will start the mount daemon and automatically mount files into minikube.": "", - "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "", + "Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "", "To connect to this cluster, use: kubectl --context={{.name}}": "", "To connect to this cluster, use: kubectl --context={{.profile_name}}": "", "To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "", - "To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "", + "To fix this, run: {{.command}}": "", + "To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "", "To see addons list for other profiles use: `minikube addons -p name list`": "", "To start minikube with HyperV Powershell must be in your PATH`": "", "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "", @@ -443,21 +470,31 @@ "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Unable to enable dashboard": "대시보드를 활성화할 수 없습니다", "Unable to fetch latest version info": "최신 버전 정보를 가져올 수 없습니다", + "Unable to find control plane": "", "Unable to generate docs": "문서를 생성할 수 없습니다", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get VM IP address": "가상 머신 IP 주소를 조회할 수 없습니다", "Unable to get addon status for {{.name}}: {{.error}}": "", + "Unable to get command runner": "", + "Unable to get control plane status: {{.error}}": "", "Unable to get current user": "현재 사용자를 조회할 수 없습니다", + "Unable to get driver IP": "", + "Unable to get machine status": "", "Unable to get runtime": "런타임을 조회할 수 없습니다", "Unable to get the status of the {{.name}} cluster.": "{{.name}} 클러스터의 상태를 조회할 수 없습니다", "Unable to kill mount process: {{.error}}": "마운트 프로세스를 중지할 수 없습니다: {{.error}}", "Unable to load cached images from config file.": "컨피그 파일로부터 캐시된 이미지를 로드할 수 없습니다", "Unable to load cached images: {{.error}}": "캐시된 이미지를 로드할 수 없습니다: {{.error}}", "Unable to load config: {{.error}}": "컨피그를 로드할 수 없습니다: {{.error}}", + "Unable to load host": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": " \"{{.kubernetes_version}}\" 를 파싱할 수 없습니다: {{.error}}", "Unable to parse default Kubernetes version from constants: {{.error}}": "", + "Unable to parse memory '{{.memory}}': {{.error}}": "", "Unable to parse oldest Kubernetes version from constants: {{.error}}": "", + "Unable to remove machine directory": "", "Unable to remove machine directory: %v": "머신 디렉토리를 제거할 수 없습니다: %v", + "Unable to restart cluster, will reset it: {{.error}}": "", + "Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "", "Unable to start VM. Please investigate and run 'minikube delete' if possible": "가상 머신을 시작할 수 없습니다. 확인 후 가능하면 'minikube delete' 를 실행하세요", "Unable to stop VM": "가상 머신을 중지할 수 없습니다", "Unable to update {{.driver}} driver: {{.error}}": "{{.driver}} 를 수정할 수 없습니다: {{.error}}", @@ -469,6 +506,8 @@ "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "", "Unset variables instead of setting them": "", "Update server returned an empty list": "", + "Updating node": "", + "Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "", "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "", "Usage": "", "Usage: minikube completion SHELL": "", @@ -488,10 +527,10 @@ "Userspace file server:": "", "Using image repository {{.name}}": "", "Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "", - "Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "", "Using the {{.driver}} driver based on existing profile": "", "Using the {{.driver}} driver based on user configuration": "", "VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "", + "Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "", "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "", "Verify the IP address of the running cluster in kubeconfig.": "", "Verifying dashboard health ...": "", @@ -509,7 +548,9 @@ "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "", "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "", "You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "", + "You can also use 'minikube kubectl -- get pods' to invoke a matching version": "", "You can delete them using the following command(s):": "다음 커맨드(들)을 사용하여 제거할 수 있습니다", + "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "", "You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "", "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "", "You must specify a service name": "service 이름을 명시해야 합니다", @@ -518,17 +559,17 @@ "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "", "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "minikube config 가 미지원 드라이버를 참조하고 있습니다. ~/.minikube 를 제거한 후, 다시 시도하세요", "Your minikube vm is not running, try minikube start.": "minikube 가상 머신이 실행 중이 아닙니다, minikube start 를 시도하세요", + "adding node": "", "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "", "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "", "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "", - "api load": "", "bash completion failed": "bash 자동 완성이 실패하였습니다", "call with cleanup=true to remove old tunnels": "", - "command runner": "", "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "", "config view failed": "config view 가 실패하였습니다", "creating api client": "api 클라이언트 생성 중", "dashboard service is not running: {{.error}}": "대시보드 서비스가 실행 중이지 않습니다: {{.error}}", + "deleting node": "", "disable failed": "비활성화가 실패하였습니다", "dry-run mode. Validates configuration, but does not mutate system state": "", "dry-run validation complete!": "dry-run 검증 완료!", @@ -541,14 +582,17 @@ "error starting tunnel": "", "error stopping tunnel": "", "failed to open browser: {{.error}}": "", + "generating join token": "", "getting config": "컨피그 조회 중", - "getting primary control plane": "", "if true, will embed the certs in kubeconfig.": "", "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "프로필을 생성하려면 다음 커맨드를 입력하세요: minikube start -p {{.profile_name}}\"", + "initialization failed, will try again: {{.error}}": "", + "joining cluster": "", "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "", "kubectl and minikube configuration will be stored in {{.home_folder}}": "kubectl 과 minikube 환경 정보는 {{.home_folder}} 에 저장될 것입니다", "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "kubectl 이 PATH 에 없습니다, 하지만 이는 대시보드에서 필요로 합니다. 설치 가이드:https://kubernetes.io/docs/tasks/tools/install-kubectl/", "kubectl proxy": "kubectl 프록시", + "libmachine failed": "", "loading config": "컨피그 로딩 중", "logdir set failed": "logdir 설정이 실패하였습니다", "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "머신 '{{.name}}' 이 존재하지 않습니다. 진행하기 앞서 가상 머신을 재생성합니다", @@ -556,7 +600,7 @@ "minikube addons list --output OUTPUT. json, list": "", "minikube is exiting due to an error. If the above message is not useful, open an issue:": "", "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "", - "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "", + "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "", "minikube profile was successfully set to {{.profile_name}}": "", "minikube status --output OUTPUT. json, text": "", "minikube {{.version}} is available! Download it: {{.url}}": "", @@ -565,14 +609,16 @@ "mount failed": "", "namespaces to pause": "", "namespaces to unpause": "", + "none driver does not support multi-node clusters": "", "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", "pause containers": "", "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "", - "profile {{.name}} is not running.": "", "reload cached images.": "", "reloads images previously added using the 'cache add' subcommand": "", "retrieving node": "", + "saving node": "", "service {{.namespace_name}}/{{.service_name}} has no node port": "", + "setting up certs": "", "stat failed": "", "status json failure": "", "status text failure": "", @@ -597,16 +643,17 @@ "usage: minikube delete": "", "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "", "zsh completion failed": "", + "{{.cluster}} IP has been updated to point at {{.ip}}": "", + "{{.cluster}} IP was already correctly configured for {{.ip}}": "", + "{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "", "{{.driver}} does not appear to be installed": "{{.driver}} 가 설치되지 않았습니다", "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "", "{{.extra_option_component_name}}.{{.key}}={{.value}}": "", - "{{.machine}} IP has been updated to point at {{.ip}}": "", - "{{.machine}} IP was already correctly configured for {{.ip}}": "", "{{.name}} cluster does not exist": "{{.name}} 클러스터가 존재하지 않습니다", "{{.name}} has no available configuration options": "{{.driver}} 이 사용 가능한 환경 정보 옵션이 없습니다", "{{.name}} is already running": "{{.driver}} 이 이미 실행 중입니다", "{{.name}} was successfully configured": "{{.driver}} 이 성공적으로 설정되었습니다", - "{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "", + "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "", "{{.prefix}}minikube {{.version}} on {{.platform}}": "", "{{.type}} is not yet a supported filesystem. We will try anyways!": "", "{{.url}} is not accessible: {{.error}}": "{{.url}} 이 접근 불가능합니다: {{.error}}" diff --git a/translations/pl.json b/translations/pl.json index 2cfee9621b..0a0d296553 100644 --- a/translations/pl.json +++ b/translations/pl.json @@ -1,11 +1,10 @@ { "\"The '{{.minikube_addon}}' addon is disabled": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "", "\"{{.minikube_addon}}\" was successfully disabled": "\"{{.minikube_addon}}\" został wyłaczony", "\"{{.name}}\" profile does not exist": "Profil \"{{.name}}\" nie istnieje", "\"{{.name}}\" profile does not exist, trying anyways.": "", - "\"{{.node_name}}\" stopped.": "", "\"{{.profile_name}}\" VM does not exist, nothing to stop": "Maszyna wirtualna \"{{.profile_name}}\" nie istnieje. Nie można zatrzymać", - "\"{{.profile_name}}\" does not exist, nothing to stop": "", "\"{{.profile_name}}\" host does not exist, unable to show an IP": "Profil \"{{.profile_name}}\" nie istnieje. Nie można wyświetlić adresu IP ", "\"{{.profile_name}}\" stopped.": "Zatrzymano \"{{.profile_name}}\"", "'none' driver does not support 'minikube docker-env' command": "sterownik 'none' nie wspiera komendy 'minikube docker-env'", @@ -13,7 +12,6 @@ "'none' driver does not support 'minikube podman-env' command": "", "'none' driver does not support 'minikube ssh' command": "sterownik 'none' nie wspiera komendy 'minikube ssh'", "'{{.driver}}' driver reported an issue: {{.error}}": "", - "'{{.profile}}' is not running": "", "- {{.profile}}": "", "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "", "A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "", @@ -32,12 +30,12 @@ "Adds a node to the given cluster config, and starts it.": "", "Adds a node to the given cluster.": "", "Advanced Commands:": "Zaawansowane komendy", - "After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Aliases": "Aliasy", "Allow user prompts for more information": "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "Ilość zarezerwowanej pamieci RAM dla maszyny wirtualnej minikube (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or )", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "Ilość zarezerwowanej pamieci RAM dla maszyny wirtualnej minikube (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or )", + "Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", "Amount of time to wait for a service in seconds": "Czas oczekiwania na serwis w sekundach", "Amount of time to wait for service in seconds": "Czas oczekiwania na servis w sekundach", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", @@ -48,6 +46,7 @@ "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", "Bind Address: {{.Address}}": "", "Block until the apiserver is servicing API requests": "", + "Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "", "Cannot find directory {{.path}} for mount": "Nie można odnoleść folderu {{.path}} do zamontowania", "Cannot use both --output and --format options": "", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", @@ -68,10 +67,11 @@ "Could not process errors from failed deletion": "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "", "Created a new profile : {{.profile_name}}": "Stworzono nowy profil : {{.profile_name}}", - "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", + "Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", "Creating a new profile failed": "Tworzenie nowego profilu nie powiodło się", "Creating mount {{.name}} ...": "", "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "Tworzenie {{.driver_name}} (CPUs={{.number_of_cpus}}, Pamięć={{.memory_size}}MB, Dysk={{.disk_size}}MB)...", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", "DEPRECATED, use `driver` instead.": "", "Default group id used for the mount": "Domyślne id groupy użyte dla montowania", "Default user id used for the mount": "Domyślne id użytkownia użyte dla montowania ", @@ -98,9 +98,9 @@ "Done! kubectl is now configured to use \"{{.name}}": "Gotowe! kubectl jest skonfigurowany do użycia z \"{{.name}}\".", "Done! kubectl is now configured to use \"{{.name}}\"": "Gotowe! kubectl jest skonfigurowany do użycia z \"{{.name}}\".", "Download complete!": "Pobieranie zakończone!", + "Downloading Kubernetes {{.version}} preload ...": "", "Downloading VM boot image ...": "Pobieranie obrazu maszyny wirtualnej ...", "Downloading driver {{.driver}}:": "", - "Downloading preloaded images tarball for k8s {{.version}} ...": "", "Downloading {{.name}} {{.version}}": "Pobieranie {{.name}} {{.version}}", "ERROR creating `registry-creds-acr` secret": "", "ERROR creating `registry-creds-dpr` secret": "", @@ -110,7 +110,6 @@ "Enable addons. see `minikube addons list` for a list of valid addon names.": "", "Enable experimental NVIDIA GPU support in minikube": "aktywuj eksperymentalne wsparcie minikube dla NVIDIA GPU", "Enable host resolver for NAT DNS requests (virtualbox driver only)": "", - "Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "", "Enable proxy for NAT DNS requests (virtualbox driver only)": "", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "", "Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list": "", @@ -131,46 +130,30 @@ "Error finding port for mount": "", "Error generating set output": "", "Error generating unset output": "", - "Error getting IP": "", - "Error getting client": "", - "Error getting client: {{.error}}": "", - "Error getting cluster": "", "Error getting cluster bootstrapper": "", "Error getting cluster config": "", - "Error getting config": "", - "Error getting control plane": "", "Error getting host": "", - "Error getting host IP": "", - "Error getting host status": "", - "Error getting machine logs": "", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", "Error getting primary control plane": "", - "Error getting primary cp": "", - "Error getting service status": "", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", "Error getting ssh client": "", "Error getting the host IP address to use from within the VM": "", - "Error host driver ip status": "", "Error killing mount process": "", - "Error loading api": "", - "Error loading profile config": "", "Error loading profile config: {{.error}}": "", "Error opening service": "", "Error parsing Driver version: {{.error}}": "Błąd parsowania wersji Driver: {{.error}}", "Error parsing minikube version: {{.error}}": "Bład parsowania wersji minikube: {{.error}}", "Error reading {{.path}}: {{.error}}": "Błąd odczytu {{.path}} {{.error}}", "Error restarting cluster": "Błąd podczas restartowania klastra", - "Error retrieving node": "", "Error setting shell variables": "Błąd podczas ustawiania zmiennych powłoki(shell)", "Error starting cluster": "Błąd podczas uruchamiania klastra", "Error starting mount": "", - "Error starting node": "", "Error while setting kubectl current context : {{.error}}": "Błąd podczas ustawiania kontekstu kubectl: {{.error}}", "Error writing mount pid": "", - "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "", "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Erreur : Vous avez sélectionné Kubernetes v{{.new}}, mais le cluster existent pour votre profil exécute Kubernetes v{{.old}}. Les rétrogradations non-destructives ne sont pas compatibles. Toutefois, vous pouvez poursuivre le processus en réalisant l'une des trois actions suivantes :\n* Créer à nouveau le cluster en utilisant Kubernetes v{{.new}} – exécutez \"minikube delete {{.profile}}\", puis \"minikube start {{.profile}} --kubernetes-version={{.new}}\".\n* Créer un second cluster avec Kubernetes v{{.new}} – exécutez \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\".\n* Réutiliser le cluster existent avec Kubernetes v{{.old}} ou version ultérieure – exécutez \"minikube start {{.profile}} --kubernetes-version={{.old}}\".", "Error: [{{.id}}] {{.error}}": "", "Examples": "Przykłady", + "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "", "Exiting": "", "Exiting.": "", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "", @@ -178,38 +161,38 @@ "Failed to cache ISO": "", "Failed to cache and load images": "", "Failed to cache binaries": "", + "Failed to cache images": "", "Failed to cache images to tar": "", "Failed to cache kubectl": "", "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "Nie udało się zmienić uprawnień pliku {{.minikube_dir_path}}: {{.error}}", - "Failed to check if machine exists": "", "Failed to check main repository and mirrors for images for images": "", "Failed to delete cluster: {{.error}}": "", "Failed to delete images": "", "Failed to delete images from config": "", - "Failed to delete node {{.name}}": "", "Failed to download kubectl": "Pobieranie kubectl nie powiodło się", "Failed to enable container runtime": "", "Failed to generate config": "", + "Failed to get API Server URL": "", "Failed to get bootstrapper": "", "Failed to get command runner": "", - "Failed to get driver URL": "", "Failed to get image map": "", "Failed to get machine client": "", "Failed to get service URL: {{.error}}": "", "Failed to kill mount process: {{.error}}": "Zabicie procesu nie powiodło się: {{.error}}", "Failed to list cached images": "", + "Failed to parse kubernetes version": "", "Failed to reload cached images": "", "Failed to remove profile": "Usunięcie profilu nie powiodło się", "Failed to save config": "Zapisywanie konfiguracji nie powiodło się", "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "", "Failed to setup certs": "Konfiguracja certyfikatów nie powiodła się", "Failed to setup kubeconfig": "Konfiguracja kubeconfig nie powiodła się", - "Failed to start node {{.name}}": "", "Failed to stop node {{.name}}": "", "Failed to update cluster": "Aktualizacja klastra nie powiodła się", "Failed to update config": "Aktualizacja konfiguracji nie powiodła się", "Failed unmount: {{.error}}": "", "File permissions used for the mount": "", + "Filter to use only VM Drivers": "", "Flags": "", "Follow": "", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "", @@ -218,13 +201,16 @@ "Force minikube to perform possibly dangerous operations": "Wymuś wykonanie potencjalnie niebezpiecznych operacji", "Found network options:": "Wykryto opcje sieciowe:", "Found {{.number}} invalid profile(s) !": "Wykryto {{.number}} nieprawidłowych profili ! ", + "Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Generate unable to parse memory '{{.memory}}': {{.error}}": "", "Gets the kubernetes URL(s) for the specified service in your local cluster": "", "Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "", "Gets the logs of the running instance, used for debugging minikube, not user code.": "Pobiera logi z aktualnie uruchomionej instancji. Przydatne do debugowania kodu który nie należy do aplikacji użytkownika", "Gets the status of a local kubernetes cluster": "Pobiera aktualny status klastra kubernetesa", "Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "", "Gets the value of PROPERTY_NAME from the minikube config file": "", - "Getting machine config failed": "", + "Getting bootstrapper": "", + "Getting primary control plane": "", "Global Flags": "", "Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "", "Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", @@ -235,6 +221,7 @@ "Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", + "If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "", "If set, install addons. Defaults to true.": "", "If set, pause all namespaces": "", "If set, unpause all namespaces": "", @@ -251,6 +238,8 @@ "Install the latest hyperkit binary, and run 'minikube delete'": "", "Invalid size passed in argument: {{.error}}": "Nieprawidłowy rozmiar przekazany w argumencie: {{.error}}", "IsEnabled failed": "", + "Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "", + "Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "", "Kill the mount process spawned by minikube start": "", "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "", "Kubernetes {{.version}} is not supported by this release of minikube": "", @@ -266,6 +255,7 @@ "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "", "Location of the minikube iso": "Ścieżka do obrazu iso minikube", "Location of the minikube iso.": "Ścieżka do obrazu iso minikube", + "Locations to fetch the minikube ISO from.": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "Zaloguj się i wykonaj polecenie w maszynie za pomocą ssh. Podobne do 'docker-machine ssh'", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "Zaloguj się i wykonaj polecenie w maszynie za pomocą ssh. Podobne do 'docker-machine ssh'", "Message Size: {{.size}}": "", @@ -283,13 +273,17 @@ "NOTE: This process must stay alive for the mount to be accessible ...": "", "Networking and Connectivity Commands:": "", "No minikube profile was found. You can create one using `minikube start`.": "", + "Node \"{{.node_name}}\" stopped.": "", "Node may be unable to resolve external DNS records": "", "Node operations": "", "Node {{.name}} was successfully deleted.": "", + "Node {{.nodeName}} does not exist.": "", + "Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "", "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "", "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "", "Not passing {{.name}}={{.value}} to docker env.": "", - "Noticed that you are using minikube docker-env:": "", + "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "", + "Number of CPUs allocated to Kubernetes.": "", "Number of CPUs allocated to the minikube VM": "Liczba procesorów przypisana do maszyny wirtualnej minikube", "Number of CPUs allocated to the minikube VM.": "Liczba procesorów przypisana do maszyny wirtualnej minikube", "Number of lines back to go within the log": "", @@ -312,6 +306,7 @@ "Please install the minikube hyperkit VM driver, or select an alternative --driver": "", "Please install the minikube kvm2 VM driver, or select an alternative --driver": "", "Please make sure the service you are looking for is deployed or is in the correct namespace.": "Proszę upewnij się, że serwis którego szukasz znajduje się w prawidłowej przestrzeni nazw", + "Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "", "Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "Proszę zaktualizować '{{.driver_executable}}'. {{.documentation_url}}", "Populates the specified folder with documentation in markdown about minikube": "", @@ -325,11 +320,11 @@ "Profile gets or sets the current minikube profile": "Pobiera lub ustawia aktywny profil minikube", "Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "", "Provide VM UUID to restore MAC address (hyperkit driver only)": "", + "Pulling base image ...": "", "Reboot to complete VirtualBox installation, and verify that VirtualBox is not blocked by your system": "Uruchom ponownie komputer aby zakończyć instalacje VirtualBox'a i upewnij się że nie jest on blokowany przez twój system", "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "", "Rebuild libvirt with virt-network support": "", "Received {{.name}} signal": "", - "Reconfiguring existing host ...": "", "Registry mirrors to pass to the Docker daemon": "", "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "", "Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "", @@ -338,8 +333,9 @@ "Removing {{.directory}} ...": "", "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "", "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "", - "Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "", - "Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "", + "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "", + "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "", + "Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "", "Retrieve the ssh identity key path of the specified cluster": "Pozyskuje ścieżkę do klucza ssh dla wyspecyfikowanego klastra", "Retrieve the ssh identity key path of the specified cluster.": "Pozyskuje ścieżkę do klucza ssh dla wyspecyfikowanego klastra.", "Retrieves the IP address of the running cluster": "Pobiera adres IP aktualnie uruchomionego klastra", @@ -352,8 +348,10 @@ "Run minikube from the C: drive.": "", "Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "", "Run the minikube command as an Administrator": "", + "Run: \"{{.delete}}\", then \"{{.start}} --alsologtostderr -v=1\" to try again with more logging": "", "Run: 'chmod 600 $HOME/.kube/config'": "", "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "", "Set failed": "", "Set flag to delete all profiles": "", "Set this flag to delete the '.minikube' folder from your user directory.": "", @@ -379,8 +377,8 @@ "Specify the 9p version that the mount should use": "", "Specify the ip that the mount should be setup on": "", "Specify the mount filesystem type (supported types: 9p)": "", - "Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "", - "Starting node": "", + "StartHost failed again: {{.error}}": "", + "StartHost failed, but will try again: {{.error}}": "", "Starting tunnel for service {{.service}}.": "", "Starts a local kubernetes cluster": "Uruchamianie lokalnego klastra kubernetesa", "Starts a node.": "", @@ -393,7 +391,6 @@ "Successfully added {{.name}} to {{.cluster}}!": "", "Successfully deleted all profiles": "", "Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "Pomyślnie zamontowano {{.sourcePath}} do {{.destinationPath}}", - "Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "", "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "", "Suggestion: {{.advice}}": "Sugestia: {{.advice}}", "Suggestion: {{.fix}}": "", @@ -423,12 +420,16 @@ "The cluster dns domain name used in the kubernetes cluster": "Domena dns clastra użyta przez kubernetesa", "The container runtime to be used (docker, crio, containerd)": "Runtime konteneryzacji (docker, crio, containerd).", "The container runtime to be used (docker, crio, containerd).": "", + "The control plane for \"{{.name}}\" is paused!": "", + "The control plane node \"{{.name}}\" does not exist.": "", + "The control plane node is not running (state={{.state}})": "", + "The control plane node must be running for this command": "", "The cri socket path to be used.": "", "The docker service is currently not active": "Serwis docker jest nieaktywny", - "The docker service within '{{.profile}}' is not active": "", + "The docker service within '{{.name}}' is not active": "", + "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", "The driver '{{.driver}}' is not supported on {{.os}}": "Sterownik '{{.driver}} jest niewspierany przez system {{.os}}", - "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "", - "The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "", + "The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "", "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "", "The initial time interval for each check that wait performs in seconds": "", @@ -440,10 +441,14 @@ "The name of the node to delete": "", "The name of the node to start": "", "The node to get logs from. Defaults to the primary control plane.": "", + "The node to ssh into. Defaults to the primary control plane.": "", + "The none driver is not compatible with multi-node clusters.": "", + "The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "", "The number of bytes to use for 9p packet payload": "", + "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", - "The podman service within '{{.profile}}' is not active": "", + "The podman service within '{{.cluster}}' is not active": "", "The service namespace": "", "The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "", "The services namespace": "", @@ -452,17 +457,21 @@ "The value passed to --format is invalid: {{.error}}": "Wartość przekazana do --format jest nieprawidłowa: {{.error}}", "The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "", "The {{.driver_name}} driver should not be used with root privileges.": "{{.driver_name}} nie powinien byc używany z przywilejami root'a.", + "There is no local cluster named \"{{.cluster}}\"": "", "These changes will take effect upon a minikube delete and then a minikube start": "", "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "", + "This control plane is not running! (state={{.state}})": "", + "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "", "This will start the mount daemon and automatically mount files into minikube.": "", - "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "", + "Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "", "To connect to this cluster, use: kubectl --context={{.name}}": "Aby połączyć się z klastrem użyj: kubectl --context={{.name}}", "To connect to this cluster, use: kubectl --context={{.profile_name}}": "Aby połaczyć się z klastem uzyj: kubectl --context={{.profile_name}}", "To disable this notice, run: 'minikube config set WantUpdateNotification false'": "Aby wyłączyć te notyfikację, użyj: 'minikube config set WantUpdateNotification false'", "To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "", - "To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "", + "To fix this, run: {{.command}}": "", + "To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "", "To see addons list for other profiles use: `minikube addons -p name list`": "", "To start minikube with HyperV Powershell must be in your PATH`": "Aby uruchomić minikube z HyperV Powershell musi znajdować się w zmiennej PATH", "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "", @@ -472,23 +481,30 @@ "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", + "Unable to find control plane": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get VM IP address": "", "Unable to get addon status for {{.name}}: {{.error}}": "", + "Unable to get command runner": "", + "Unable to get control plane status: {{.error}}": "", "Unable to get current user": "", + "Unable to get driver IP": "", + "Unable to get machine status": "", "Unable to get runtime": "", - "Unable to get the status of the {{.name}} cluster.": "", "Unable to kill mount process: {{.error}}": "", "Unable to load cached images from config file.": "", "Unable to load cached images: {{.error}}": "", "Unable to load config: {{.error}}": "", + "Unable to load host": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "", "Unable to parse default Kubernetes version from constants: {{.error}}": "", + "Unable to parse memory '{{.memory}}': {{.error}}": "", "Unable to parse oldest Kubernetes version from constants: {{.error}}": "", - "Unable to remove machine directory: %v": "", + "Unable to remove machine directory": "", + "Unable to restart cluster, will reset it: {{.error}}": "", "Unable to start VM": "Nie można uruchomić maszyny wirtualnej", - "Unable to start VM. Please investigate and run 'minikube delete' if possible": "", + "Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "", "Unable to stop VM": "Nie można zatrzymać maszyny wirtualnej", "Unable to update {{.driver}} driver: {{.error}}": "", "Unable to verify SSH connectivity: {{.error}}. Will retry...": "", @@ -499,6 +515,8 @@ "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "", "Unset variables instead of setting them": "", "Update server returned an empty list": "", + "Updating node": "", + "Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "", "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "", "Usage": "", "Usage: minikube completion SHELL": "", @@ -518,11 +536,11 @@ "Userspace file server:": "", "Using image repository {{.name}}": "", "Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "", - "Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "", "Using the {{.driver}} driver based on existing profile": "", "Using the {{.driver}} driver based on user configuration": "", "VM driver is one of: %v": "Sterownik wirtualnej maszyny to jeden z: %v", "VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "", + "Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "", "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "Weryfikuję czy zmienne HTTP_PROXY i HTTPS_PROXY sa ustawione poprawnie", "Verify the IP address of the running cluster in kubeconfig.": "Weryfikuję adres IP działającego klastra w kubeconfig", "Verifying dashboard health ...": "Weryfikuję status dashboardu", @@ -538,12 +556,13 @@ "Wait failed": "", "Wait failed: {{.error}}": "", "Waiting for SSH access ...": "Oczekiwanie na połaczenie SSH...", - "Waiting for cluster to come online ...": "", "Waiting for:": "Oczekiwanie na :", "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "", "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "", "You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "", + "You can also use 'minikube kubectl -- get pods' to invoke a matching version": "", "You can delete them using the following command(s):": "", + "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "", "You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "", "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "", "You must specify a service name": "Musisz podać nazwę serwisu", @@ -552,45 +571,43 @@ "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "", "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "", "Your minikube vm is not running, try minikube start.": "", + "adding node": "", "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "", "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "", "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "", - "api load": "", "bash completion failed": "", "call with cleanup=true to remove old tunnels": "", - "command runner": "", "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "", "config view failed": "", - "creating api client": "", "dashboard service is not running: {{.error}}": "", + "deleting node": "", "disable failed": "", "dry-run mode. Validates configuration, but does not mutate system state": "", "dry-run validation complete!": "", "enable failed": "", "error creating clientset": "", - "error creating machine client": "", "error getting primary control plane": "", "error getting ssh port": "", "error parsing the input ip address for mount": "", "error starting tunnel": "", "error stopping tunnel": "", "failed to open browser: {{.error}}": "Nie udało się otworzyć przeglądarki: {{.error}}", - "getting config": "", - "getting primary control plane": "", + "generating join token": "", "if true, will embed the certs in kubeconfig.": "", "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "", + "initialization failed, will try again: {{.error}}": "", + "joining cluster": "", "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "", "kubectl and minikube configuration will be stored in {{.home_folder}}": "konfiguracja minikube i kubectl będzie przechowywana w katalogu {{.home_dir}}", "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "kubectl nie zostało odnaleźione w zmiennej środowiskowej ${PATH}. Instrukcja instalacji: https://kubernetes.io/docs/tasks/tools/install-kubectl/", "kubectl proxy": "", - "loading config": "", + "libmachine failed": "", "logdir set failed": "", - "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "", "max time to wait per Kubernetes core services to be healthy.": "", "minikube addons list --output OUTPUT. json, list": "", "minikube is exiting due to an error. If the above message is not useful, open an issue:": "", "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "", - "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "", + "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "", "minikube profile was successfully set to {{.profile_name}}": "", "minikube status --output OUTPUT. json, text": "", "minikube {{.version}} is available! Download it: {{.url}}": "minikube {{.version}} jest dostępne! Pobierz je z: {{.url}}", @@ -599,14 +616,16 @@ "mount failed": "Montowanie się nie powiodło", "namespaces to pause": "", "namespaces to unpause": "", + "none driver does not support multi-node clusters": "", "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", "pause containers": "", "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "", - "profile {{.name}} is not running.": "", "reload cached images.": "", "reloads images previously added using the 'cache add' subcommand": "", "retrieving node": "", + "saving node": "", "service {{.namespace_name}}/{{.service_name}} has no node port": "", + "setting up certs": "", "stat failed": "", "status json failure": "", "status text failure": "", @@ -633,16 +652,17 @@ "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "", "zsh completion failed": "", "{{.addonName}} was successfully enabled": "{{.addonName}} został aktywowany pomyślnie", + "{{.cluster}} IP has been updated to point at {{.ip}}": "", + "{{.cluster}} IP was already correctly configured for {{.ip}}": "", + "{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "", "{{.driver}} does not appear to be installed": "", "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "", "{{.extra_option_component_name}}.{{.key}}={{.value}}": "", - "{{.machine}} IP has been updated to point at {{.ip}}": "", - "{{.machine}} IP was already correctly configured for {{.ip}}": "", "{{.name}} cluster does not exist": "Klaster {{.name}} nie istnieje", "{{.name}} has no available configuration options": "{{.name}} nie posiada opcji configuracji", "{{.name}} is already running": "", "{{.name}} was successfully configured": "{{.name}} skonfigurowano pomyślnie", - "{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "", + "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "", "{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.prefix}}minikube {{.version}} na {{.platform}}", "{{.type}} is not yet a supported filesystem. We will try anyways!": "{{.type}} nie jest wspierany przez system plików. I tak spróbujemy!", "{{.url}} is not accessible: {{.error}}": "" diff --git a/translations/zh-CN.json b/translations/zh-CN.json index 1973fd0f72..10d574dc6e 100644 --- a/translations/zh-CN.json +++ b/translations/zh-CN.json @@ -1,12 +1,11 @@ { "\"The '{{.minikube_addon}}' addon is disabled": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "", "\"{{.minikube_addon}}\" was successfully disabled": "已成功禁用 \"{{.minikube_addon}}\"", "\"{{.name}}\" cluster does not exist. Proceeding ahead with cleanup.": "\"{{.name}}\" 集群不存在,将继续清理", "\"{{.name}}\" profile does not exist": "“{{.name}}”配置文件不存在", "\"{{.name}}\" profile does not exist, trying anyways.": "", - "\"{{.node_name}}\" stopped.": "", "\"{{.profile_name}}\" VM does not exist, nothing to stop": "\"{{.profile_name}}\" 虚拟机不存在,没有什么可供停止的", - "\"{{.profile_name}}\" does not exist, nothing to stop": "", "\"{{.profile_name}}\" host does not exist, unable to show an IP": "\"{{.profile_name}}\" 主机不存在,无法显示其IP", "\"{{.profile_name}}\" stopped.": "\"{{.profile_name}}\" 已停止", "'none' driver does not support 'minikube docker-env' command": "'none' 驱动不支持 'minikube docker-env' 命令", @@ -14,7 +13,6 @@ "'none' driver does not support 'minikube podman-env' command": "", "'none' driver does not support 'minikube ssh' command": "'none' 驱动不支持 'minikube ssh' 命令", "'{{.driver}}' driver reported an issue: {{.error}}": "'{{.driver}}' 驱动程序报告了一个问题: {{.error}}", - "'{{.profile}}' is not running": "", "- {{.profile}}": "", "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "VPN 或者防火墙正在干扰对 minikube 虚拟机的 HTTP 访问。或者,您可以使用其它的虚拟机驱动:https://minikube.sigs.k8s.io/docs/start/", "A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "防火墙正在阻止 minikube 虚拟机中的 Docker 访问互联网。您可能需要对其进行配置为使用代理", @@ -37,12 +35,12 @@ "Adds a node to the given cluster config, and starts it.": "", "Adds a node to the given cluster.": "", "Advanced Commands:": "高级命令:", - "After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Aliases": "别名", "Allow user prompts for more information": "允许用户提示以获取更多信息", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "用于从中拉取 docker 镜像的备选镜像存储库。如果您对 gcr.io 的访问受到限制,则可以使用该镜像存储库。将镜像存储库设置为“auto”可让 minikube 为您选择一个存储库。对于中国大陆用户,您可以使用本地 gcr.io 镜像,例如 registry.cn-hangzhou.aliyuncs.com/google_containers", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "为 minikube 虚拟机分配的 RAM 容量(格式:\u003c数字\u003e[\u003c单位\u003e],其中单位 = b、k、m 或 g)", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "为 minikube 虚拟机分配的 RAM 容量(格式:\u003c数字\u003e[\u003c单位\u003e],其中单位 = b、k、m 或 g)。", + "Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", "Amount of time to wait for a service in seconds": "等待服务的时间(单位秒)", "Amount of time to wait for service in seconds": "等待服务的时间(单位秒)", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", @@ -56,6 +54,7 @@ "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", "Bind Address: {{.Address}}": "", "Block until the apiserver is servicing API requests": "阻塞直到 apiserver 为 API 请求提供服务", + "Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "", "Cannot find directory {{.path}} for mount": "找不到用来挂载的 {{.path}} 目录", "Cannot use both --output and --format options": "不能同时使用 --output 和 --format 选项", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "检查 'journalctl -xeu kubelet' 的输出,尝试启动 minikube 时添加参数 --extra-config=kubelet.cgroup-driver=systemd", @@ -80,11 +79,12 @@ "Could not process errors from failed deletion": "无法处理删除失败的错误", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "需要使用的镜像镜像的国家/地区代码。留空以使用全球代码。对于中国大陆用户,请将其设置为 cn。", "Created a new profile : {{.profile_name}}": "创建了新的配置文件:{{.profile_name}}", - "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "正在 {{.driver_name}} 容器中 创建 Kubernetes,(CPUs={{.number_of_cpus}}), 内存={{.memory_size}}MB ({{.host_memory_size}}MB 可用", + "Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", "Creating a new profile failed": "创建新的配置文件失败", "Creating mount {{.name}} ...": "正在创建装载 {{.name}}…", "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "正在创建 {{.driver_name}} 虚拟机(CPUs={{.number_of_cpus}},Memory={{.memory_size}}MB, Disk={{.disk_size}}MB)...", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", "DEPRECATED, use `driver` instead.": "", "Default group id used for the mount": "用于挂载默认的 group id", "Default user id used for the mount": "用于挂载默认的 user id", @@ -112,9 +112,9 @@ "Done! kubectl is now configured to use \"{{.name}}\"": "完成!kubectl 已经配置至 \"{{.name}}\"", "Done! kubectl is now configured to use {{.name}}": "完成!kubectl已经配置至{{.name}}", "Download complete!": "下载完成!", + "Downloading Kubernetes {{.version}} preload ...": "", "Downloading VM boot image ...": "正在下载 VM boot image...", "Downloading driver {{.driver}}:": "正在下载驱动 {{.driver}}:", - "Downloading preloaded images tarball for k8s {{.version}} ...": "", "Downloading {{.name}} {{.version}}": "正在下载 {{.name}} {{.version}}", "ERROR creating `registry-creds-acr` secret": "", "ERROR creating `registry-creds-dpr` secret": "创建 `registry-creds-dpr` secret 时出错", @@ -162,21 +162,17 @@ "Error getting cluster bootstrapper": "获取 cluster bootstrapper 时出错", "Error getting cluster config": "", "Error getting config": "获取 config 时出错", - "Error getting control plane": "", "Error getting host": "获取 host 时出错", - "Error getting host IP": "", "Error getting host status": "获取 host status 时出错", "Error getting machine logs": "获取 machine logs 时出错", "Error getting machine status": "获取 machine status 时出错", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", "Error getting primary control plane": "", - "Error getting primary cp": "", "Error getting profiles to delete": "获取待删除配置文件时出错", "Error getting service status": "获取 service status 时出错", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "使用 namespace: {{.namespace}} 和 labels {{.labelName}}:{{.addonName}} 获取 service 时出错:{{.error}}", "Error getting ssh client": "", "Error getting the host IP address to use from within the VM": "从虚拟机中获取 host IP 地址时出错", - "Error host driver ip status": "", "Error killing mount process": "杀死 mount 进程时出错", "Error loading api": "加载 api 时出错", "Error loading profile config": "加载配置文件的配置时出错", @@ -187,11 +183,9 @@ "Error parsing minikube version: {{.error}}": "解析 minikube 版本时出错:{{.error}}", "Error reading {{.path}}: {{.error}}": "读取 {{.path}} 时出错:{{.error}}", "Error restarting cluster": "重启 cluster 时出错", - "Error retrieving node": "", "Error setting shell variables": "设置 shell 变量时出错", "Error starting cluster": "开启 cluster 时出错", "Error starting mount": "开启 mount 时出错", - "Error starting node": "", "Error unsetting shell variables": "取消设置 shell 变量时出错", "Error while setting kubectl current context : {{.error}}": "设置 kubectl 上下文时出错 :{{.error}}", "Error writing mount pid": "写入 mount pid 时出错", @@ -199,6 +193,7 @@ "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "错误:您已选择 Kubernetes v{{.new}},但您的配置文件的现有集群正在运行 Kubernetes v{{.old}}。非破坏性降级不受支持,但若要继续操作,您可以执行以下选项之一:\n* 使用 Kubernetes v{{.new}} 重新创建现有集群:运行“minikube delete {{.profile}}”,然后运行“minikube start {{.profile}} --kubernetes-version={{.new}}”\n* 使用 Kubernetes v{{.new}} 再创建一个集群:运行“minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}”\n* 通过 Kubernetes v{{.old}} 或更高版本重复使用现有集群:运行“minikube start {{.profile}} --kubernetes-version={{.old}}”", "Error: [{{.id}}] {{.error}}": "错误:[{{.id}}] {{.error}}", "Examples": "示例", + "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "", "Exiting": "正在退出", "Exiting due to driver incompatibility": "由于驱动程序不兼容而退出", "Exiting.": "正在退出。", @@ -217,10 +212,10 @@ "Failed to delete cluster: {{.error}}__1": "未能删除集群:{{.error}}", "Failed to delete images": "删除镜像时失败", "Failed to delete images from config": "无法删除配置的镜像", - "Failed to delete node {{.name}}": "", "Failed to download kubectl": "下载 kubectl 失败", "Failed to enable container runtime": "", "Failed to generate config": "无法生成配置", + "Failed to get API Server URL": "", "Failed to get bootstrapper": "获取 bootstrapper 失败", "Failed to get command runner": "", "Failed to get driver URL": "获取 driver URL 失败", @@ -229,6 +224,7 @@ "Failed to get service URL: {{.error}}": "获取 service URL 失败:{{.error}}", "Failed to kill mount process: {{.error}}": "未能终止装载进程:{{.error}}", "Failed to list cached images": "无法列出缓存镜像", + "Failed to parse kubernetes version": "", "Failed to reload cached images": "重新加载缓存镜像失败", "Failed to remove profile": "无法删除配置文件", "Failed to save config": "无法保存配置", @@ -236,12 +232,12 @@ "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "未能设置 NO_PROXY 环境变量。请使用“export NO_PROXY=$NO_PROXY,{{.ip}}”。", "Failed to setup certs": "设置 certs 失败", "Failed to setup kubeconfig": "设置 kubeconfig 失败", - "Failed to start node {{.name}}": "", "Failed to stop node {{.name}}": "", "Failed to update cluster": "更新 cluster 失败", "Failed to update config": "更新 config 失败", "Failed unmount: {{.error}}": "unmount 失败:{{.error}}", "File permissions used for the mount": "用于 mount 的文件权限", + "Filter to use only VM Drivers": "", "Flags": "标志", "Follow": "跟踪", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "为获得最佳结果,请安装 kubectl:https://kubernetes.io/docs/tasks/tools/install-kubectl/", @@ -251,13 +247,17 @@ "Force minikube to perform possibly dangerous operations": "强制 minikube 执行可能有风险的操作", "Found network options:": "找到的网络选项:", "Found {{.number}} invalid profile(s) !": "找到 {{.number}} 个无效的配置文件!", + "Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Generate unable to parse memory '{{.memory}}': {{.error}}": "", "Gets the kubernetes URL(s) for the specified service in your local cluster": "获取本地集群中指定服务的 kubernetes URL", "Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "获取本地集群中指定服务的 kubernetes URL。如果有多个 URL,他们将一次打印一个", "Gets the logs of the running instance, used for debugging minikube, not user code.": "获取正在运行的实例日志,用于调试 minikube,不是用户代码", "Gets the status of a local kubernetes cluster": "获取本地 kubernetes 集群状态", "Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "", "Gets the value of PROPERTY_NAME from the minikube config file": "", + "Getting bootstrapper": "", "Getting machine config failed": "获取机器配置失败", + "Getting primary control plane": "", "Global Flags": "", "Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "", "Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", @@ -270,6 +270,7 @@ "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --vm-driver": "Hyperkit 的网络挂了。升级到最新的 hyperkit 版本以及/或者 Docker 桌面版。或者,你可以通过 --vm-driver 切换其他选项", "If set, automatically updates drivers to the latest version. Defaults to true.": "如果设置了,将自动更新驱动到最新版本。默认为 true。", + "If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "", "If set, install addons. Defaults to true.": "", "If set, pause all namespaces": "", "If set, unpause all namespaces": "", @@ -286,12 +287,12 @@ "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "传递给 Docker 守护进程的不安全 Docker 注册表。系统会自动添加默认服务 CIDR 范围。", "Install VirtualBox, or select an alternative value for --driver": "", "Install the latest hyperkit binary, and run 'minikube delete'": "", - "Invalid size passed in argument: {{.error}}": "", "IsEnabled failed": "", + "Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "", + "Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "", "Kill the mount process spawned by minikube start": "", "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "Kubernetes {{.new}} 现在可用了。如果您想升级,请指定 --kubernetes-version={{.new}}", "Kubernetes {{.version}} is not supported by this release of minikube": "当前版本的 minukube 不支持 Kubernetes {{.version}}", - "Launching Kubernetes ...": "", "Launching Kubernetes ... ": "正在启动 Kubernetes ... ", "Launching proxy ...": "", "List all available images from the local cache.": "", @@ -303,7 +304,7 @@ "Local folders to share with Guest via NFS mounts (hyperkit driver only)": "通过 NFS 装载与访客共享的本地文件夹(仅限 hyperkit 驱动程序)", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "用于网络连接的 VPNKit 套接字的位置。如果为空,则停用 Hyperkit VPNKitSock;如果为“auto”,则将 Docker 用于 Mac VPNKit 连接;否则使用指定的 VSock(仅限 hyperkit 驱动程序)", "Location of the minikube iso": "minikube iso 的位置", - "Location of the minikube iso.": "", + "Locations to fetch the minikube ISO from.": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "", "Message Size: {{.size}}": "", @@ -321,15 +322,18 @@ "NOTE: This process must stay alive for the mount to be accessible ...": "", "Networking and Connectivity Commands:": "网络和连接命令:", "No minikube profile was found. You can create one using `minikube start`.": "", + "Node \"{{.node_name}}\" stopped.": "", "Node may be unable to resolve external DNS records": "", "Node operations": "", "Node {{.name}} was successfully deleted.": "", + "Node {{.nodeName}} does not exist.": "", + "Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "", "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "您所在位置的已知存储库都无法访问。正在将 {{.image_repository_name}} 用作后备存储库。", "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "已知存储库都无法访问。请考虑使用 --image-repository 标志指定备选镜像存储库", "Not passing {{.name}}={{.value}} to docker env.": "", - "Noticed that you are using minikube docker-env:": "", + "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "", + "Number of CPUs allocated to Kubernetes.": "", "Number of CPUs allocated to the minikube VM": "分配给 minikube 虚拟机的 CPU 的数量", - "Number of CPUs allocated to the minikube VM.": "", "Number of lines back to go within the log": "", "OS release is {{.pretty_name}}": "", "Open the addons URL with https instead of http": "", @@ -350,6 +354,7 @@ "Please install the minikube hyperkit VM driver, or select an alternative --driver": "", "Please install the minikube kvm2 VM driver, or select an alternative --driver": "", "Please make sure the service you are looking for is deployed or is in the correct namespace.": "", + "Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "", "Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "请升级“{{.driver_executable}}”。{{.documentation_url}}", "Populates the specified folder with documentation in markdown about minikube": "", @@ -363,6 +368,7 @@ "Profile gets or sets the current minikube profile": "获取或设置当前的 minikube 配置文件", "Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "配置文件名称 \"{{.profilename}}\" 是 minikube 的一个关键字。使用 minikube delete -p \u003cprofile name\u003e 命令 删除配置文件", "Provide VM UUID to restore MAC address (hyperkit driver only)": "提供虚拟机 UUID 以恢复 MAC 地址(仅限 hyperkit 驱动程序)", + "Pulling base image ...": "", "Pulling images ...": "拉取镜像 ...", "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "重启以完成 VirtualBox 安装,检查 VirtualBox 未被您的操作系统禁用,或者使用其他的管理程序。", "Rebuild libvirt with virt-network support": "", @@ -378,7 +384,10 @@ "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "请求的 CPU 数量 {{.requested_cpus}} 小于允许的最小值 {{.minimum_cpus}}", "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "请求的磁盘大小 {{.requested_size}} 小于最小值 {{.minimum_size}}", "Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "请求的内存分配 ({{.memory}}MB) 小于默认内存分配 {{.default_memorysize}}MB。请注意 minikube 可能无法正常运行或可能会意外崩溃。", + "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "", "Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "请求的内存分配 {{.requested_size}} 小于允许的 {{.minimum_size}} 最小值", + "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "", + "Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "", "Retrieve the ssh identity key path of the specified cluster": "检索指定集群的 ssh 密钥路径", "Retrieve the ssh identity key path of the specified cluster.": "检索指定集群的 ssh 密钥路径。", "Retrieves the IP address of the running cluster": "检索正在运行的群集的 IP 地址", @@ -391,10 +400,12 @@ "Run minikube from the C: drive.": "", "Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "", "Run the minikube command as an Administrator": "", + "Run: \"{{.delete}}\", then \"{{.start}} --alsologtostderr -v=1\" to try again with more logging": "", "Run: 'chmod 600 $HOME/.kube/config'": "执行 'chmod 600 $HOME/.kube/config'", "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", "Selecting '{{.driver}}' driver from existing profile (alternates: {{.alternates}})": "从现有配置文件中选择 '{{.driver}}' 驱动程序 (可选:{{.alternates}})", "Selecting '{{.driver}}' driver from user configuration (alternates: {{.alternates}})": "从用户配置中选择 {{.driver}}' 驱动程序(可选:{{.alternates}})", + "Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "", "Set failed": "", "Set flag to delete all profiles": "设置标志以删除所有配置文件", "Set this flag to delete the '.minikube' folder from your user directory.": "设置这个标志来删除您用户目录下的 '.minikube' 文件夹。", @@ -420,8 +431,8 @@ "Specify the 9p version that the mount should use": "", "Specify the ip that the mount should be setup on": "", "Specify the mount filesystem type (supported types: 9p)": "", - "Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "", - "Starting node": "", + "StartHost failed again: {{.error}}": "", + "StartHost failed, but will try again: {{.error}}": "", "Starting tunnel for service {{.service}}.": "", "Starts a local kubernetes cluster": "启动本地 kubernetes 集群", "Starts a node.": "", @@ -469,12 +480,16 @@ "The cluster dns domain name used in the kubernetes cluster": "kubernetes 集群中使用的集群 dns 域名", "The container runtime to be used (docker, crio, containerd)": "需要使用的容器运行时(docker、crio、containerd)", "The container runtime to be used (docker, crio, containerd).": "", + "The control plane for \"{{.name}}\" is paused!": "", + "The control plane node \"{{.name}}\" does not exist.": "", + "The control plane node is not running (state={{.state}})": "", + "The control plane node must be running for this command": "", "The cri socket path to be used": "需要使用的 cri 套接字路径", "The cri socket path to be used.": "", - "The docker service within '{{.profile}}' is not active": "", + "The docker service within '{{.name}}' is not active": "", + "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", "The driver '{{.driver}}' is not supported on {{.os}}": "{{.os}} 不支持驱动程序“{{.driver}}”", - "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "", - "The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "", + "The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "hyperv 虚拟交换机名称。默认为找到的第一个 hyperv 虚拟交换机。(仅限 hyperv 驱动程序)", "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "管理程序似乎配置的不正确。执行 'minikube start --alsologtostderr -v=1' 并且检查错误代码", "The initial time interval for each check that wait performs in seconds": "", @@ -486,10 +501,14 @@ "The name of the node to delete": "", "The name of the node to start": "", "The node to get logs from. Defaults to the primary control plane.": "", + "The node to ssh into. Defaults to the primary control plane.": "", + "The none driver is not compatible with multi-node clusters.": "", + "The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "", "The number of bytes to use for 9p packet payload": "", + "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "输出的格式。'json' 或者 'table'", "The path on the file system where the docs in markdown need to be saved": "", - "The podman service within '{{.profile}}' is not active": "", + "The podman service within '{{.cluster}}' is not active": "", "The service namespace": "", "The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "", "The services namespace": "", @@ -498,19 +517,24 @@ "The value passed to --format is invalid: {{.error}}": "", "The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "", "The {{.driver_name}} driver should not be used with root privileges.": "不应以根权限使用 {{.driver_name}} 驱动程序。", + "There is no local cluster named \"{{.cluster}}\"": "", "There's a new version for '{{.driver_executable}}'. Please consider upgrading. {{.documentation_url}}": "“{{.driver_executable}}”有一个新版本。请考虑升级。{{.documentation_url}}", "These changes will take effect upon a minikube delete and then a minikube start": "", "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "此操作还可通过设置环境变量 CHANGE_MINIKUBE_NONE_USER=true 自动完成", + "This control plane is not running! (state={{.state}})": "", + "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "这将保留现有 kubectl 上下文并创建 minikube 上下文。", "This will start the mount daemon and automatically mount files into minikube": "这将启动装载守护进程并将文件自动装载到 minikube 中", "This will start the mount daemon and automatically mount files into minikube.": "", + "Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "", "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "提示:要移除这个由根用户拥有的集群,请运行 sudo {{.cmd}} delete", "To connect to this cluster, use: kubectl --context={{.name}}": "如需连接到此集群,请使用 kubectl --context={{.name}}", "To connect to this cluster, use: kubectl --context={{.name}}__1": "如需连接到此集群,请使用 kubectl --context={{.name}}", "To connect to this cluster, use: kubectl --context={{.profile_name}}": "", "To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "", - "To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "", + "To fix this, run: {{.command}}": "", + "To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "", "To see addons list for other profiles use: `minikube addons -p name list`": "", "To start minikube with HyperV Powershell must be in your PATH`": "", "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "如需以您自己的用户身份使用 kubectl 或 minikube 命令,您可能需要重新定位该命令。例如,如需覆盖您的自定义设置,请运行:", @@ -521,23 +545,32 @@ "Unable to determine a default driver to use. Try specifying --vm-driver, or see https://minikube.sigs.k8s.io/docs/start/": "无法确定要使用的默认驱动。尝试通过 --vm-dirver 指定,或者查阅 https://minikube.sigs.k8s.io/docs/start/", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", + "Unable to find control plane": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get VM IP address": "", "Unable to get addon status for {{.name}}: {{.error}}": "", "Unable to get bootstrapper: {{.error}}": "无法获取引导程序:{{.error}}", + "Unable to get command runner": "", + "Unable to get control plane status: {{.error}}": "", "Unable to get current user": "", + "Unable to get driver IP": "", + "Unable to get machine status": "", "Unable to get runtime": "", "Unable to get the status of the {{.name}} cluster.": "无法获取 {{.name}} 集群状态。", "Unable to kill mount process: {{.error}}": "", "Unable to load cached images from config file.": "无法从配置文件中加载缓存的镜像。", "Unable to load cached images: {{.error}}": "", "Unable to load config: {{.error}}": "无法加载配置:{{.error}}", + "Unable to load host": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "无法解析“{{.kubernetes_version}}”:{{.error}}", "Unable to parse default Kubernetes version from constants: {{.error}}": "无法从常量中解析默认的 Kubernetes 版本号: {{.error}}", + "Unable to parse memory '{{.memory}}': {{.error}}": "", "Unable to parse oldest Kubernetes version from constants: {{.error}}": "无法从常量中解析最旧的 Kubernetes 版本号: {{.error}}", "Unable to pull images, which may be OK: {{.error}}": "无法拉取镜像,有可能是正常状况:{{.error}}", - "Unable to remove machine directory: %v": "", + "Unable to remove machine directory": "", + "Unable to restart cluster, will reset it: {{.error}}": "", + "Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "", "Unable to start VM. Please investigate and run 'minikube delete' if possible": "无法启动虚拟机。可能的话请检查后执行 'minikube delete'", "Unable to stop VM": "无法停止虚拟机", "Unable to update {{.driver}} driver: {{.error}}": "", @@ -549,6 +582,8 @@ "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "", "Unset variables instead of setting them": "", "Update server returned an empty list": "", + "Updating node": "", + "Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "", "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "", "Upgrading from Kubernetes {{.old}} to {{.new}}": "正在从 Kubernetes {{.old}} 升级到 {{.new}}", "Usage": "使用方法", @@ -575,6 +610,7 @@ "VM driver is one of: %v": "虚拟机驱动程序是以下项之一:%v", "VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "虚拟机无权访问 {{.repository}},或许您需要配置代理或者设置 --image-repository", "VM may be unable to resolve external DNS records": "虚拟机可能无法解析外部 DNS 记录", + "Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "", "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "验证是否正确设置了 HTTP_PROXY 和 HTTPS_PROXY 环境变量。", "Verify the IP address of the running cluster in kubeconfig.": "在 kubeconfig 中验证正在运行的集群 IP 地址。", "Verifying dashboard health ...": "正在验证 dashboard 运行情况 ...", @@ -597,7 +633,9 @@ "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "NFS 共享的根目录位置,默认为 /nfsshares(仅限 hyperkit 驱动程序)", "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "", "You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "您似乎正在使用代理,但您的 NO_PROXY 环境不包含 minikube IP ({{.ip_address}})。如需了解详情,请参阅 {{.documentation_url}}", + "You can also use 'minikube kubectl -- get pods' to invoke a matching version": "", "You can delete them using the following command(s):": "", + "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "", "You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "您可能需要从管理程序中手动移除“{{.name}}”虚拟机", "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "", "You must specify a service name": "", @@ -606,46 +644,44 @@ "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "", "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "", "Your minikube vm is not running, try minikube start.": "", + "adding node": "", "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "", "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "", "addon enable failed": "启用插件失败", "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "插件使用诸如 \"minikube addons enable dashboard\" 的子命令修改 minikube 的插件文件", - "api load": "", "bash completion failed": "", "call with cleanup=true to remove old tunnels": "", - "command runner": "", "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "", "config view failed": "", - "creating api client": "", "dashboard service is not running: {{.error}}": "", + "deleting node": "", "disable failed": "禁用失败", "dry-run mode. Validates configuration, but does not mutate system state": "", "dry-run validation complete!": "", "enable failed": "开启失败", "error creating clientset": "", - "error creating machine client": "", "error getting primary control plane": "", "error getting ssh port": "", "error parsing the input ip address for mount": "", "error starting tunnel": "", "error stopping tunnel": "", "failed to open browser: {{.error}}": "", - "getting config": "", - "getting primary control plane": "", + "generating join token": "", "if true, will embed the certs in kubeconfig.": "", "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "", + "initialization failed, will try again: {{.error}}": "", + "joining cluster": "", "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "kubeadm 检测一个到与其他进程的 TCP 端口冲突:或许是另外的本地安装的 Kubernetes 导致。执行 lsof -p\u003cport\u003e 查找并杀死这些进程", "kubectl and minikube configuration will be stored in {{.home_folder}}": "kubectl 和 minikube 配置将存储在 {{.home_folder}} 中", "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "", "kubectl proxy": "", - "loading config": "", + "libmachine failed": "", "logdir set failed": "", - "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "", "max time to wait per Kubernetes core services to be healthy.": "每个 Kubernetes 核心服务保持健康所需的最长时间。", "minikube addons list --output OUTPUT. json, list": "", "minikube is exiting due to an error. If the above message is not useful, open an issue:": "由于出错 minikube 正在退出。如果以上信息没有帮助,请提交问题反馈:", "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "", - "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "", + "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "", "minikube is unable to connect to the VM: {{.error}}\n\nThis is likely due to one of two reasons:\n\n- VPN or firewall interference\n- {{.hypervisor}} network configuration issue\n\nSuggested workarounds:\n\n- Disable your local VPN or firewall software\n- Configure your local VPN or firewall to allow access to {{.ip}}\n- Restart or reinstall {{.hypervisor}}\n- Use an alternative --vm-driver": "minikube 无法连接到虚拟机:{{.error}}\n\n可能是由于以下两个原因之一导致:\n\n-VPN 或防火墙冲突\n- {{.hypervisor}} 网络配置问题\n建议的方案:\n\n- 禁用本地的 VPN 或者防火墙软件\n- 配置本地 VPN 或防火墙软件,放行 {{.ip}}\n- 重启或者重装 {{.hypervisor}}\n- 使用另外的 --vm-driver", "minikube profile was successfully set to {{.profile_name}}": "", "minikube status --output OUTPUT. json, text": "", @@ -655,14 +691,16 @@ "mount failed": "", "namespaces to pause": "", "namespaces to unpause": "", + "none driver does not support multi-node clusters": "", "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", "pause containers": "暂停容器", "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "", - "profile {{.name}} is not running.": "", "reload cached images.": "重新加载缓存的镜像", "reloads images previously added using the 'cache add' subcommand": "重新加载之前通过子命令 'cache add' 添加的镜像", "retrieving node": "", + "saving node": "", "service {{.namespace_name}}/{{.service_name}} has no node port": "", + "setting up certs": "", "stat failed": "", "status json failure": "", "status text failure": "", @@ -687,16 +725,17 @@ "usage: minikube delete": "", "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "", "zsh completion failed": "", + "{{.cluster}} IP has been updated to point at {{.ip}}": "", + "{{.cluster}} IP was already correctly configured for {{.ip}}": "", + "{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "", "{{.driver}} does not appear to be installed": "似乎并未安装 {{.driver}}", "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "似乎并未安装 {{.driver}},但已被当前的配置文件指定。请执行 'minikube delete' 或者安装 {{.driver}}", "{{.extra_option_component_name}}.{{.key}}={{.value}}": "", - "{{.machine}} IP has been updated to point at {{.ip}}": "", - "{{.machine}} IP was already correctly configured for {{.ip}}": "", - "{{.name}} cluster does not exist": "", "{{.name}} has no available configuration options": "", "{{.name}} is already running": "", "{{.name}} was successfully configured": "", "{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "{{.path}} 的版本是 {{.client_version}},且与 Kubernetes {{.cluster_version}} 不兼容。您需要更新 {{.path}} 或者使用 'minikube kubectl' 连接到这个集群", + "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "", "{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.platform}} 上的 {{.prefix}}minikube {{.version}}", "{{.type}} is not yet a supported filesystem. We will try anyways!": "", "{{.url}} is not accessible: {{.error}}": "" From 798c007d999ed67921b361f27d36d1e1f192713f Mon Sep 17 00:00:00 2001 From: "re;i" Date: Fri, 27 Mar 2020 01:06:50 +0000 Subject: [PATCH 316/668] commit FAQ --- site/content/en/docs/FAQ/_index.md | 8 ++++++++ site/content/en/docs/FAQ/sudo_prompts.md | 20 ++++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 site/content/en/docs/FAQ/_index.md create mode 100644 site/content/en/docs/FAQ/sudo_prompts.md diff --git a/site/content/en/docs/FAQ/_index.md b/site/content/en/docs/FAQ/_index.md new file mode 100644 index 0000000000..4c22c42c78 --- /dev/null +++ b/site/content/en/docs/FAQ/_index.md @@ -0,0 +1,8 @@ +--- +title: "FAQ" +linkTitle: "FAQ" +weight: 5 +description: > + Questions that come up regularly +--- + diff --git a/site/content/en/docs/FAQ/sudo_prompts.md b/site/content/en/docs/FAQ/sudo_prompts.md new file mode 100644 index 0000000000..a78b9d141d --- /dev/null +++ b/site/content/en/docs/FAQ/sudo_prompts.md @@ -0,0 +1,20 @@ +--- +title: "Sudo prompts" +linkTitle: "Sudo prompts" +weight: 1 +date: 2020-03-26 +description: > + Disabling sudo prompts when using minikude start/stop/status, kubectl cluster-info, ... +--- + +## Use the `docker` driver + +Use the `docker` driver rather than the `none` driver. `docker` driver should be used unless it does not meet requirements for some reason. + +## For `none` users + +For `none` users, `CHANGE_MINIKUBE_NONE_USER=true`, kubectl and such will still work: [see environment variables](https://minikube.sigs.k8s.io/docs/reference/environment_variables/) + +## Otherwise deal with `sudo` + +Configure `sudo` to never prompt for the commands issued by minikube. From da578b8727ef257052e3b77ec19a182083d0fb91 Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Thu, 26 Mar 2020 21:00:45 -0500 Subject: [PATCH 317/668] change continue on error to false --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 4eb9b9706a..f94d59a89b 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -100,7 +100,7 @@ jobs: with: name: minikube_binaries - name: Run Integration Test - continue-on-error: true + continue-on-error: false # bash {0} to allow test to continue to next step. in case of shell: bash {0} run: | From d8cf3339abd9e71f0ce6e2b389327fcc97a85253 Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Thu, 26 Mar 2020 21:38:07 -0500 Subject: [PATCH 318/668] test old file --- .github/workflows/main.yml | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f94d59a89b..f19cf8b641 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -211,21 +211,15 @@ jobs: echo $STAT | jq '.FailedTests' || true echo "-------------------------------------------------------" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi - docker_windows_shell: + docker_windows_NT: needs: [build_minikube] env: TIME_ELAPSED: time - JOB_NAME: "Docker_windows_shell" + JOB_NAME: "Docker_windows_NT" COMMIT_STATUS: "" runs-on: windows-latest steps: - uses: actions/checkout@v2 - - name: Docker Info - shell: bash - run: | - docker info || true - docker version || true - docker ps || true - name: Download gopogh run: | curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh.exe @@ -262,7 +256,7 @@ jobs: shell: bash - uses: actions/upload-artifact@v1 with: - name: docker_windows_shell + name: docker_windows_NT path: report - name: The End Result run: | @@ -271,8 +265,6 @@ jobs: echo "----------------${numFail} Failures----------------------------" echo $STAT | jq '.FailedTests' || true echo "--------------------------------------------" - numPass=$(echo $STAT | jq '.NumberOfPass') - echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi shell: bash none_ubuntu16_04: From 0bc5adee14bd5b45ef04b76ac78764719611b79b Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Thu, 26 Mar 2020 21:45:22 -0500 Subject: [PATCH 319/668] change to new file --- .github/workflows/main.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f19cf8b641..dcdc6d15a3 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -211,11 +211,11 @@ jobs: echo $STAT | jq '.FailedTests' || true echo "-------------------------------------------------------" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi - docker_windows_NT: + docker_windows_shell: needs: [build_minikube] env: TIME_ELAPSED: time - JOB_NAME: "Docker_windows_NT" + JOB_NAME: "Docker_windows_shell" COMMIT_STATUS: "" runs-on: windows-latest steps: @@ -256,7 +256,7 @@ jobs: shell: bash - uses: actions/upload-artifact@v1 with: - name: docker_windows_NT + name: docker_windows_shell path: report - name: The End Result run: | @@ -265,6 +265,8 @@ jobs: echo "----------------${numFail} Failures----------------------------" echo $STAT | jq '.FailedTests' || true echo "--------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi shell: bash none_ubuntu16_04: From b7bcda88180f632eb69050e5863cafa4e573ea69 Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Thu, 26 Mar 2020 21:58:39 -0500 Subject: [PATCH 320/668] delete expected-default-driver --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index dcdc6d15a3..6854bcb1f2 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -235,7 +235,7 @@ jobs: mkdir -p report mkdir -p testhome START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe --expected-default-driver=hyperv -minikube-start-args=--vm-driver=hyperv -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe -minikube-start-args=--vm-driver=hyperv -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) From 5466cbf4ac1b1ad09713e21d64f5a0340b67a51a Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Thu, 26 Mar 2020 22:10:17 -0500 Subject: [PATCH 321/668] rename the stage --- .github/workflows/main.yml | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 6854bcb1f2..cd3d6844fc 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -211,15 +211,21 @@ jobs: echo $STAT | jq '.FailedTests' || true echo "-------------------------------------------------------" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi - docker_windows_shell: + docker_on_windows: needs: [build_minikube] env: TIME_ELAPSED: time - JOB_NAME: "Docker_windows_shell" + JOB_NAME: "Docker_on_windows" COMMIT_STATUS: "" runs-on: windows-latest steps: - uses: actions/checkout@v2 + - name: Docker Info + shell: bash + run: | + docker info || true + docker version || true + docker ps || true - name: Download gopogh run: | curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh.exe @@ -256,7 +262,7 @@ jobs: shell: bash - uses: actions/upload-artifact@v1 with: - name: docker_windows_shell + name: docker_on_windows path: report - name: The End Result run: | @@ -493,15 +499,15 @@ jobs: run: | mkdir -p all_reports cp -r docker_ubuntu_18_04 ./all_reports/ - - name: download results docker_windows_shell + - name: download results docker_on_windows uses: actions/download-artifact@v1 with: - name: docker_windows_shell + name: docker_on_windows - name: cp to all_report shell: bash run: | mkdir -p all_reports - cp -r docker_windows_shell ./all_reports/ + cp -r docker_on_windows ./all_reports/ - name: Download Results none_ubuntu16_04 uses: actions/download-artifact@v1 with: From 3f74cb61ef6e2bc4f323bab71508e96f14f50b9e Mon Sep 17 00:00:00 2001 From: Eberhard Wolff Date: Fri, 27 Mar 2020 08:16:48 +0100 Subject: [PATCH 322/668] Fixed typo --- pkg/minikube/machine/fix.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index c86c89b306..7323b33089 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -138,7 +138,7 @@ func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node } if !recreated { - out.T(out.Restarting, `Retarting existing {{.driver_name}} {{.machine_type}} for "{{.cluster}}" ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) + out.T(out.Restarting, `Restarting existing {{.driver_name}} {{.machine_type}} for "{{.cluster}}" ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) } if err := h.Driver.Start(); err != nil { return h, errors.Wrap(err, "driver start") From d2f3e18d3ac842483a251bd0a03b2400021a6828 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Fri, 27 Mar 2020 09:33:21 +0100 Subject: [PATCH 323/668] Fix misspelling of existence in integration test --- test/integration/guest_env_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/guest_env_test.go b/test/integration/guest_env_test.go index 201d188ec2..0b4958ad4c 100644 --- a/test/integration/guest_env_test.go +++ b/test/integration/guest_env_test.go @@ -49,7 +49,7 @@ func TestGuestEnvironment(t *testing.T) { t.Parallel() rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("which %s", pkg))) if err != nil { - t.Errorf("failed to verify existance of %q binary : args %q: %v", pkg, rr.Command(), err) + t.Errorf("failed to verify existence of %q binary : args %q: %v", pkg, rr.Command(), err) } }) } @@ -70,7 +70,7 @@ func TestGuestEnvironment(t *testing.T) { t.Parallel() rr, err := Run(t, exec.CommandContext(ctx, Targt(), "-p", profile, "ssh", fmt.Sprintf("df -t ext4 %s | grep %s", mount, mount))) if err != nil { - t.Errorf("failed to verify existance of %q mount. args %q: %v", mount, rr.Command(), err) + t.Errorf("failed to verify existence of %q mount. args %q: %v", mount, rr.Command(), err) } }) } From 7c9068c991fcb5b80fb22a59984c219817f9a56d Mon Sep 17 00:00:00 2001 From: Ruben Baez Date: Fri, 27 Mar 2020 08:53:49 -0500 Subject: [PATCH 324/668] change driver --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index cd3d6844fc..168b01261e 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -241,7 +241,7 @@ jobs: mkdir -p report mkdir -p testhome START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe -minikube-start-args=--vm-driver=hyperv -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe -minikube-start-args=--vm-driver=docker -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) From 47858bf92b6227a6003647a6412b99aa0d0fe3ef Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 27 Mar 2020 10:05:58 -0700 Subject: [PATCH 325/668] More translation documentation updates --- .../en/docs/Contributing/translations.md | 28 ++++++++++--------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/site/content/en/docs/Contributing/translations.md b/site/content/en/docs/Contributing/translations.md index aa1f8b8c8d..15aafb2748 100644 --- a/site/content/en/docs/Contributing/translations.md +++ b/site/content/en/docs/Contributing/translations.md @@ -12,7 +12,7 @@ All translations are stored in the top-level `translations` directory. * Add a new json file in the translations directory with the locale code of the language you want to add translations for, e.g. fr for French. ``` - ~/minikube$ touch translations/ar.json + ~/minikube$ touch translations/fr.json ~/minikube$ ls translations/ de.json es.json fr.json ja.json ko.json pl.json zh-CN.json ``` @@ -31,8 +31,7 @@ All translations are stored in the top-level `translations` directory. Writing to zh-CN.json Done! ``` -* Add translated strings as the value of the map where the English phrase is the key. - * The file will be json file with all of the English phrases as the keys of a map +* Add translated strings to the json file as the value of the map where the English phrase is the key. ``` ~/minikube$ head translations/fr.json { @@ -47,21 +46,24 @@ All translations are stored in the top-level `translations` directory. ``` * Add the translations as the values of the map, keeping in mind that anything in double braces `{{}}` are variable names describing what gets injected and should not be translated. ``` - ~/minikube$ vi translations/ar.json + ~/minikube$ vi translations/fr.json { - "\"The '{{.minikube_addon}}' addon is disabled": "", - "\"{{.machineName}}\" does not exist, nothing to stop": "\""{{.machineName}} n'exist pas, rien a arrêter.", - "\"{{.name}}\" profile does not exist, trying anyways.": "", - "'none' driver does not support 'minikube docker-env' command": "", - "'none' driver does not support 'minikube mount' command": "", - "'none' driver does not support 'minikube podman-env' command": "", - "'none' driver does not support 'minikube ssh' command": "", - "'{{.driver}}' driver reported an issue: {{.error}}": "", + \[...\] + "Amount of time to wait for a service in seconds": "", + "Amount of time to wait for service in seconds": "", + "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", + "Automatically selected the {{.driver}} driver": "Choix automatique du driver {{.driver}}", + "Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "Choix automatique du driver {{.driver}}. Autres choix: {{.alternatives}}", + "Available Commands": "", + "Basic Commands:": "", + "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", + \[...\] + } ``` ### Adding Translations To an Existing Language * Run `make extract` to make sure all strings are up to date -* Edit the appropriate json file in the 'translations' directory, in the same way described above. +* Edit the appropriate json file in the 'translations' directory, in the same way as described above. ### Testing translations * Once you have all the translations you want, save the file and rebuild the minikube from scratch to pick up your new translations: From 969c90f5fb6835dfd8bbe83feead75cd78975d3d Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 27 Mar 2020 10:14:27 -0700 Subject: [PATCH 326/668] spacing --- site/content/en/docs/Contributing/translations.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/site/content/en/docs/Contributing/translations.md b/site/content/en/docs/Contributing/translations.md index 15aafb2748..244cbc8ac0 100644 --- a/site/content/en/docs/Contributing/translations.md +++ b/site/content/en/docs/Contributing/translations.md @@ -48,8 +48,8 @@ All translations are stored in the top-level `translations` directory. ``` ~/minikube$ vi translations/fr.json { - \[...\] - "Amount of time to wait for a service in seconds": "", + [...] + "Amount of time to wait for a service in seconds": "", "Amount of time to wait for service in seconds": "", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", "Automatically selected the {{.driver}} driver": "Choix automatique du driver {{.driver}}", @@ -57,7 +57,7 @@ All translations are stored in the top-level `translations` directory. "Available Commands": "", "Basic Commands:": "", "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", - \[...\] + [...] } ``` From 339960589be38413e8caa849256cc4a734ffea8c Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 27 Mar 2020 10:28:01 -0700 Subject: [PATCH 327/668] spacing 2 electric boogaloo --- site/content/en/docs/Contributing/translations.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/site/content/en/docs/Contributing/translations.md b/site/content/en/docs/Contributing/translations.md index 244cbc8ac0..0fb40d0b10 100644 --- a/site/content/en/docs/Contributing/translations.md +++ b/site/content/en/docs/Contributing/translations.md @@ -48,7 +48,7 @@ All translations are stored in the top-level `translations` directory. ``` ~/minikube$ vi translations/fr.json { - [...] + [...] "Amount of time to wait for a service in seconds": "", "Amount of time to wait for service in seconds": "", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", From 2420bb1f3a3e5327a13ac017a3d355425e21850a Mon Sep 17 00:00:00 2001 From: Vincent Link Date: Fri, 27 Mar 2020 21:06:22 +0100 Subject: [PATCH 328/668] Show all global flags in cli options help Before this fix the flags shown in `minikube options` were taken from a list of options (`viperWhiteList`) which seems arbitrary. I still don't know for sure why this is done but taking all PersistentFlags (persistent=apply to all sub commands) from the root command seems like the correct way. --- cmd/minikube/cmd/options.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cmd/minikube/cmd/options.go b/cmd/minikube/cmd/options.go index cc07420d6c..80f97606d1 100644 --- a/cmd/minikube/cmd/options.go +++ b/cmd/minikube/cmd/options.go @@ -37,10 +37,9 @@ var optionsCmd = &cobra.Command{ // runOptions handles the executes the flow of "minikube options" func runOptions(cmd *cobra.Command, args []string) { out.String("The following options can be passed to any command:\n\n") - for _, flagName := range viperWhiteList { - f := pflag.Lookup(flagName) - out.String(flagUsage(f)) - } + cmd.Root().PersistentFlags().VisitAll(func(flag *pflag.Flag) { + out.String(flagUsage(flag)) + }) } func flagUsage(flag *pflag.Flag) string { From 31c6c5f36eb4390d671792cb90ccde015d0179a8 Mon Sep 17 00:00:00 2001 From: Marcin Niemira Date: Sat, 28 Mar 2020 12:11:41 +1100 Subject: [PATCH 329/668] use table testing for property testing --- pkg/minikube/localpath/localpath_test.go | 50 ++++++++++++++++++++---- 1 file changed, 43 insertions(+), 7 deletions(-) diff --git a/pkg/minikube/localpath/localpath_test.go b/pkg/minikube/localpath/localpath_test.go index 81b40722dc..bd78eb2ef7 100644 --- a/pkg/minikube/localpath/localpath_test.go +++ b/pkg/minikube/localpath/localpath_test.go @@ -67,13 +67,6 @@ func TestHasWindowsDriveLetter(t *testing.T) { } } -func TestConfigFile(t *testing.T) { - configFile := ConfigFile() - if !strings.Contains(configFile, "config.json") { - t.Errorf("ConfigFile returned path without 'config.json': %s", configFile) - } -} - func TestMiniPath(t *testing.T) { var testCases = []struct { env, basePath string @@ -100,3 +93,46 @@ func TestMiniPath(t *testing.T) { }) } } + +type propertyFnWithArg func(string) string +type propertyFnWithoutArg func() string + +func TestPropertyWithNameArg(t *testing.T) { + var testCases = []struct { + propertyFunc propertyFnWithArg + }{ + {Profile}, + {ClientCert}, + {ClientKey}, + } + miniPath := MiniPath() + mockedName := "foo" + for _, tc := range testCases { + t.Run(fmt.Sprintf("%v", tc.propertyFunc), func(t *testing.T) { + if !strings.Contains(tc.propertyFunc(mockedName), MiniPath()) { + t.Errorf("Propert %v doesn't contain miniPat %v", tc.propertyFunc, miniPath) + } + if !strings.Contains(tc.propertyFunc(mockedName), mockedName) { + t.Errorf("Propert %v doesn't contain passed name inpath %v", tc.propertyFunc, mockedName) + } + }) + + } +} + +func TestPropertyWithoutNameArg(t *testing.T) { + var testCases = []struct { + propertyFunc propertyFnWithoutArg + }{ + {ConfigFile}, + {CACert}, + } + miniPath := MiniPath() + for _, tc := range testCases { + t.Run(fmt.Sprintf("%v", tc.propertyFunc), func(t *testing.T) { + if !strings.Contains(tc.propertyFunc(), MiniPath()) { + t.Errorf("Propert %v doesn't contain miniPat %v", tc.propertyFunc, miniPath) + } + }) + } +} From c985a7287a08b5c35d32613eb15f87019820ac7f Mon Sep 17 00:00:00 2001 From: Marcin Niemira Date: Sat, 28 Mar 2020 13:46:16 +1100 Subject: [PATCH 330/668] test properties --- pkg/minikube/localpath/localpath_test.go | 46 +++++++++++++++++------- 1 file changed, 34 insertions(+), 12 deletions(-) diff --git a/pkg/minikube/localpath/localpath_test.go b/pkg/minikube/localpath/localpath_test.go index bd78eb2ef7..7ce6d68a3e 100644 --- a/pkg/minikube/localpath/localpath_test.go +++ b/pkg/minikube/localpath/localpath_test.go @@ -83,7 +83,7 @@ func TestMiniPath(t *testing.T) { t.Fatalf("Error reverting env %s to its original value (%s) var after test ", MinikubeHome, originalEnv) } }() - t.Run(fmt.Sprintf("%s", tc.env), func(t *testing.T) { + t.Run(tc.env, func(t *testing.T) { expectedPath := filepath.Join(tc.basePath, ".minikube") os.Setenv(MinikubeHome, tc.env) path := MiniPath() @@ -94,44 +94,66 @@ func TestMiniPath(t *testing.T) { } } +func TestMachinePath(t *testing.T) { + var testCases = []struct { + miniHome []string + contains string + }{ + {[]string{"tmp", "foo", "bar", "baz"}, "tmp"}, + {[]string{"tmp"}, "tmp"}, + {[]string{}, MiniPath()}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s", tc.miniHome), func(t *testing.T) { + machinePath := MachinePath("foo", tc.miniHome...) + if !strings.Contains(machinePath, tc.contains) { + t.Errorf("Function MachinePath returned (%v) which doesn't contain expected (%v)", machinePath, tc.contains) + } + }) + } +} + type propertyFnWithArg func(string) string -type propertyFnWithoutArg func() string func TestPropertyWithNameArg(t *testing.T) { var testCases = []struct { propertyFunc propertyFnWithArg + name string }{ - {Profile}, - {ClientCert}, - {ClientKey}, + {Profile, "Profile"}, + {ClientCert, "ClientCert"}, + {ClientKey, "ClientKey"}, } miniPath := MiniPath() mockedName := "foo" for _, tc := range testCases { - t.Run(fmt.Sprintf("%v", tc.propertyFunc), func(t *testing.T) { + t.Run(tc.name, func(t *testing.T) { if !strings.Contains(tc.propertyFunc(mockedName), MiniPath()) { - t.Errorf("Propert %v doesn't contain miniPat %v", tc.propertyFunc, miniPath) + t.Errorf("Propert %s(%v) doesn't contain miniPat %v", tc.name, tc.propertyFunc, miniPath) } if !strings.Contains(tc.propertyFunc(mockedName), mockedName) { - t.Errorf("Propert %v doesn't contain passed name inpath %v", tc.propertyFunc, mockedName) + t.Errorf("Propert %s(%v) doesn't contain passed name inpath %v", tc.name, tc.propertyFunc, mockedName) } }) } } +type propertyFnWithoutArg func() string + func TestPropertyWithoutNameArg(t *testing.T) { var testCases = []struct { propertyFunc propertyFnWithoutArg + name string }{ - {ConfigFile}, - {CACert}, + {ConfigFile, "ConfigFile"}, + {CACert, "CACert"}, } miniPath := MiniPath() for _, tc := range testCases { - t.Run(fmt.Sprintf("%v", tc.propertyFunc), func(t *testing.T) { + t.Run(tc.name, func(t *testing.T) { if !strings.Contains(tc.propertyFunc(), MiniPath()) { - t.Errorf("Propert %v doesn't contain miniPat %v", tc.propertyFunc, miniPath) + t.Errorf("Propert %s(%v) doesn't contain miniPat %v", tc.name, tc.propertyFunc, miniPath) } }) } From 93d6829905446838def9f3e9ee9e24361663937b Mon Sep 17 00:00:00 2001 From: Marcin Niemira Date: Sat, 28 Mar 2020 13:49:39 +1100 Subject: [PATCH 331/668] typofix --- pkg/minikube/localpath/localpath_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/minikube/localpath/localpath_test.go b/pkg/minikube/localpath/localpath_test.go index 7ce6d68a3e..6a4ee4e51b 100644 --- a/pkg/minikube/localpath/localpath_test.go +++ b/pkg/minikube/localpath/localpath_test.go @@ -129,10 +129,10 @@ func TestPropertyWithNameArg(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { if !strings.Contains(tc.propertyFunc(mockedName), MiniPath()) { - t.Errorf("Propert %s(%v) doesn't contain miniPat %v", tc.name, tc.propertyFunc, miniPath) + t.Errorf("Property %s(%v) doesn't contain miniPath %v", tc.name, tc.propertyFunc, miniPath) } if !strings.Contains(tc.propertyFunc(mockedName), mockedName) { - t.Errorf("Propert %s(%v) doesn't contain passed name inpath %v", tc.name, tc.propertyFunc, mockedName) + t.Errorf("Property %s(%v) doesn't contain passed name %v", tc.name, tc.propertyFunc, mockedName) } }) @@ -153,7 +153,7 @@ func TestPropertyWithoutNameArg(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { if !strings.Contains(tc.propertyFunc(), MiniPath()) { - t.Errorf("Propert %s(%v) doesn't contain miniPat %v", tc.name, tc.propertyFunc, miniPath) + t.Errorf("Property %s(%v) doesn't contain expected miniPath %v", tc.name, tc.propertyFunc, miniPath) } }) } From 546b26e695a8c9af8530959ffe7d52443382fe93 Mon Sep 17 00:00:00 2001 From: Marcin Niemira Date: Sat, 28 Mar 2020 20:16:48 +1100 Subject: [PATCH 332/668] implement CR suggestions --- pkg/minikube/localpath/localpath_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/localpath/localpath_test.go b/pkg/minikube/localpath/localpath_test.go index 6a4ee4e51b..4753752ee2 100644 --- a/pkg/minikube/localpath/localpath_test.go +++ b/pkg/minikube/localpath/localpath_test.go @@ -75,8 +75,8 @@ func TestMiniPath(t *testing.T) { {"/tmp/", "/tmp"}, {"", homedir.HomeDir()}, } + originalEnv := os.Getenv(MinikubeHome) for _, tc := range testCases { - originalEnv := os.Getenv(MinikubeHome) defer func() { // revert to pre-test env var err := os.Setenv(MinikubeHome, originalEnv) if err != nil { From 73dd9914f61ad8a68fcbe83f71f37adcf0e5a7ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Sat, 28 Mar 2020 12:25:32 +0100 Subject: [PATCH 333/668] Fix gofmt and golint on master, after merges --- pkg/minikube/bootstrapper/bsutil/kverify/kverify.go | 1 + pkg/minikube/bootstrapper/certs.go | 1 + pkg/minikube/mustload/mustload.go | 3 ++- pkg/minikube/tunnel/route_freebsd.go | 2 -- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go index 6f47fc3204..b80f0689e4 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go @@ -231,6 +231,7 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c return nil } +// APIServerVersionMatch checks if the server version matches the expected func APIServerVersionMatch(client *kubernetes.Clientset, expected string) error { vi, err := client.ServerVersion() if err != nil { diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index cbd9d27b86..28ea6e5295 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -129,6 +129,7 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) return copyableFiles, nil } +// CACerts has cert and key for CA (and Proxy) type CACerts struct { caCert string caKey string diff --git a/pkg/minikube/mustload/mustload.go b/pkg/minikube/mustload/mustload.go index e9703c1746..9897102638 100644 --- a/pkg/minikube/mustload/mustload.go +++ b/pkg/minikube/mustload/mustload.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// mustload loads minikube clusters, exiting with user-friendly messages +// Package mustload loads minikube clusters, exiting with user-friendly messages package mustload import ( @@ -37,6 +37,7 @@ import ( "k8s.io/minikube/pkg/minikube/out" ) +// ClusterController holds all the needed information for a minikube cluster type ClusterController struct { Config *config.ClusterConfig API libmachine.API diff --git a/pkg/minikube/tunnel/route_freebsd.go b/pkg/minikube/tunnel/route_freebsd.go index d88d5301ca..19cd4e4ad3 100644 --- a/pkg/minikube/tunnel/route_freebsd.go +++ b/pkg/minikube/tunnel/route_freebsd.go @@ -164,5 +164,3 @@ func (router *osRouter) Cleanup(route *Route) error { } return nil } - - From 1f31d25c16dfef600d06e425d81863f25d72a12e Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Sat, 28 Mar 2020 13:19:48 -0700 Subject: [PATCH 334/668] Use LookPath to verify conntrack install --- cmd/minikube/cmd/start.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index cb97d8ca43..498849f437 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -836,10 +836,8 @@ func validateFlags(cmd *cobra.Command, drvName string) { // conntrack is required starting with kubernetes 1.18, include the release candidates for completion version, _ := util.ParseKubernetesVersion(getKubernetesVersion(nil)) if version.GTE(semver.MustParse("1.18.0-beta.1")) { - err := exec.Command("conntrack").Run() - if err != nil { - exit.WithCodeT(exit.Config, "The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}", out.V{"k8sVersion": version.String()}) - + if _, err := exec.LookPath("conntrack"); err != nil { + exit.WithCodeT(exit.Config, "Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()}) } } } From 27882f122d978bb85b09e50ddb9ca4a62ac59d68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Str=C3=B6mberg?= Date: Sat, 28 Mar 2020 13:44:43 -0700 Subject: [PATCH 335/668] Issue template: attempt to get better reproduction cases I'm curious if this will yield better issue reports. --- .github/ISSUE_TEMPLATE/__en-US.md | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/__en-US.md b/.github/ISSUE_TEMPLATE/__en-US.md index 261f435c2d..8b72b70175 100644 --- a/.github/ISSUE_TEMPLATE/__en-US.md +++ b/.github/ISSUE_TEMPLATE/__en-US.md @@ -2,21 +2,16 @@ name: English about: Report an issue --- - + +**Commands required to reproduce the issue:** -**The exact command to reproduce the issue**: +1. +2. +3. + +**The full output of the command that failed**: -**The full output of the command that failed**:
+**Optional: The full output of the `minikube start` command you used, if not already included**: - - -
- -**The output of the `minikube logs` command**:
- - - -
- -**The operating system version**: +**Optional: The full output of `minikube logs`**: From 8a4289c3c4a2ab6ea00e094d97f72811be785015 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Str=C3=B6mberg?= Date: Sat, 28 Mar 2020 21:02:09 -0700 Subject: [PATCH 336/668] Minor tweaks --- .github/ISSUE_TEMPLATE/__en-US.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/__en-US.md b/.github/ISSUE_TEMPLATE/__en-US.md index 8b72b70175..0d283dc54b 100644 --- a/.github/ISSUE_TEMPLATE/__en-US.md +++ b/.github/ISSUE_TEMPLATE/__en-US.md @@ -2,16 +2,19 @@ name: English about: Report an issue --- - -**Commands required to reproduce the issue:** + +**Commands required to reproduce the issue:** 1. 2. 3. - -**The full output of the command that failed**: +**The full output of the command that failed**: + + **Optional: The full output of the `minikube start` command you used, if not already included**: + + **Optional: The full output of `minikube logs`**: From d05a13aa4e30378832d0d432077a9ecba3848d28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Str=C3=B6mberg?= Date: Sat, 28 Mar 2020 21:04:39 -0700 Subject: [PATCH 337/668] Simplify template --- .github/ISSUE_TEMPLATE/__en-US.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/__en-US.md b/.github/ISSUE_TEMPLATE/__en-US.md index 0d283dc54b..be92553c31 100644 --- a/.github/ISSUE_TEMPLATE/__en-US.md +++ b/.github/ISSUE_TEMPLATE/__en-US.md @@ -3,18 +3,17 @@ name: English about: Report an issue --- -**Commands required to reproduce the issue:** +**Steps to reproduce the issue:** 1. 2. 3. -**The full output of the command that failed**: +**The full output of the command that failed:** - -**Optional: The full output of the `minikube start` command you used, if not already included**: +**Optional: The full output of the `minikube start` command used, if not already included:** +**Optional: The full output of the `minikube logs` command:** -**Optional: The full output of `minikube logs`**: From 736b60de89c338451110f8e2fed7822456fe592b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Str=C3=B6mberg?= Date: Sat, 28 Mar 2020 21:05:55 -0700 Subject: [PATCH 338/668] Update __en-US.md --- .github/ISSUE_TEMPLATE/__en-US.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/__en-US.md b/.github/ISSUE_TEMPLATE/__en-US.md index be92553c31..1f056087fb 100644 --- a/.github/ISSUE_TEMPLATE/__en-US.md +++ b/.github/ISSUE_TEMPLATE/__en-US.md @@ -2,7 +2,6 @@ name: English about: Report an issue --- - **Steps to reproduce the issue:** 1. @@ -12,8 +11,13 @@ about: Report an issue **The full output of the command that failed:** + **Optional: The full output of the `minikube start` command used, if not already included:** -**Optional: The full output of the `minikube logs` command:** +**Optional: The full output of the `minikube logs` command:** +
+ + +
From da7227eb7249cc46917f794767a3bc9a452542b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Str=C3=B6mberg?= Date: Sat, 28 Mar 2020 21:08:17 -0700 Subject: [PATCH 339/668] Less words --- .github/ISSUE_TEMPLATE/__en-US.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/__en-US.md b/.github/ISSUE_TEMPLATE/__en-US.md index 1f056087fb..f917021a78 100644 --- a/.github/ISSUE_TEMPLATE/__en-US.md +++ b/.github/ISSUE_TEMPLATE/__en-US.md @@ -8,15 +8,15 @@ about: Report an issue 2. 3. -**The full output of the command that failed:** +**Full output of failed command:** -**Optional: The full output of the `minikube start` command used, if not already included:** +**Full output of `minikube start` command used, if not already included:** -**Optional: The full output of the `minikube logs` command:** +**Optional: Full output of `minikube logs` command:**
From 417081aeb672e242ed31955d63a1f4b64b8f131b Mon Sep 17 00:00:00 2001 From: Marcin Niemira Date: Sun, 29 Mar 2020 15:08:30 +1100 Subject: [PATCH 340/668] move defer outside of for loop --- pkg/minikube/localpath/localpath_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/minikube/localpath/localpath_test.go b/pkg/minikube/localpath/localpath_test.go index 4753752ee2..d8e6915a43 100644 --- a/pkg/minikube/localpath/localpath_test.go +++ b/pkg/minikube/localpath/localpath_test.go @@ -76,13 +76,13 @@ func TestMiniPath(t *testing.T) { {"", homedir.HomeDir()}, } originalEnv := os.Getenv(MinikubeHome) + defer func() { // revert to pre-test env var + err := os.Setenv(MinikubeHome, originalEnv) + if err != nil { + t.Fatalf("Error reverting env %s to its original value (%s) var after test ", MinikubeHome, originalEnv) + } + }() for _, tc := range testCases { - defer func() { // revert to pre-test env var - err := os.Setenv(MinikubeHome, originalEnv) - if err != nil { - t.Fatalf("Error reverting env %s to its original value (%s) var after test ", MinikubeHome, originalEnv) - } - }() t.Run(tc.env, func(t *testing.T) { expectedPath := filepath.Join(tc.basePath, ".minikube") os.Setenv(MinikubeHome, tc.env) From 5bb0c580a0750427a7453259f47da81fc8ad120b Mon Sep 17 00:00:00 2001 From: Vincent Link Date: Sun, 29 Mar 2020 15:12:37 +0200 Subject: [PATCH 341/668] Add simple embed-certs integration test --- test/integration/start_stop_delete_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index dd106484c9..c484a2491b 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -71,6 +71,9 @@ func TestStartStop(t *testing.T) { "--disable-driver-mounts", "--extra-config=kubeadm.ignore-preflight-errors=SystemVerification", }}, + {"embed-certs", constants.DefaultKubernetesVersion, []string{ + "--embed-certs", + }}, } for _, tc := range tests { From 7debdacf5bade075ba1d3cde5999e247905da772 Mon Sep 17 00:00:00 2001 From: Vincent Link Date: Sun, 29 Mar 2020 15:13:59 +0200 Subject: [PATCH 342/668] Write the kubeconfig after generating certs The content of the kubeconfig is defined before certs are generated by the bootstrapper. When certs are embedded via --embed-certs writing the kubeconfig fails if the certificates are not generated so it must run after the bootstrap process which generates them. --- pkg/minikube/node/start.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 44ff9bb1cc..c72df40965 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -101,10 +101,10 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo showVersionInfo(n.KubernetesVersion, cr) var bs bootstrapper.Bootstrapper - var kubeconfig *kubeconfig.Settings + var kcs *kubeconfig.Settings if apiServer { // Must be written before bootstrap, otherwise health checks may flake due to stale IP - kubeconfig, err = setupKubeconfig(host, &cc, &n, cc.Name) + kcs = setupKubeconfig(host, &cc, &n, cc.Name) if err != nil { exit.WithError("Failed to setup kubeconfig", err) } @@ -115,6 +115,11 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo if err != nil { exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, cc, mRunner)) } + + // write the kubeconfig to the file system after everything required (like certs) are created by the bootstrapper + if err := kubeconfig.Update(kcs); err != nil { + exit.WithError("Failed to update kubeconfig file.", err) + } } else { bs, err = cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n) if err != nil { @@ -124,7 +129,6 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo if err = bs.SetupCerts(cc.KubernetesConfig, n); err != nil { exit.WithError("setting up certs", err) } - } configureMounts() @@ -175,8 +179,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo } } - return kubeconfig - + return kcs } // ConfigureRuntimes does what needs to happen to get a runtime going. @@ -239,7 +242,7 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) return bs } -func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { +func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) *kubeconfig.Settings { addr, err := apiServerURL(*h, *cc, *n) if err != nil { exit.WithError("Failed to get API Server URL", err) @@ -259,10 +262,7 @@ func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clu } kcs.SetPath(kubeconfig.PathFromEnv()) - if err := kubeconfig.Update(kcs); err != nil { - return kcs, err - } - return kcs, nil + return kcs } func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) { From 063077c712c4d8b88aabdea861f18963755326f8 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Sun, 29 Mar 2020 11:34:21 -0700 Subject: [PATCH 343/668] mustload: Check apiserver status using forwarded IP/port --- cmd/minikube/cmd/docker-env.go | 4 +-- cmd/minikube/cmd/logs.go | 10 +++---- cmd/minikube/cmd/mount.go | 10 +++---- cmd/minikube/cmd/podman-env.go | 6 ++-- cmd/minikube/cmd/ssh.go | 4 +-- pkg/minikube/mustload/mustload.go | 48 ++++++++++++++++++++----------- 6 files changed, 49 insertions(+), 33 deletions(-) diff --git a/cmd/minikube/cmd/docker-env.go b/cmd/minikube/cmd/docker-env.go index 4b61a435d2..901e57fc7b 100644 --- a/cmd/minikube/cmd/docker-env.go +++ b/cmd/minikube/cmd/docker-env.go @@ -129,7 +129,7 @@ var dockerEnvCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { cname := ClusterFlagValue() co := mustload.Running(cname) - driverName := co.CPHost.DriverName + driverName := co.CP.Host.DriverName if driverName == driver.None { exit.UsageT(`'none' driver does not support 'minikube docker-env' command`) @@ -140,7 +140,7 @@ var dockerEnvCmd = &cobra.Command{ out.V{"runtime": co.Config.KubernetesConfig.ContainerRuntime}) } - if ok := isDockerActive(co.CPRunner); !ok { + if ok := isDockerActive(co.CP.Runner); !ok { exit.WithCodeT(exit.Unavailable, `The docker service within '{{.name}}' is not active`, out.V{"name": cname}) } diff --git a/cmd/minikube/cmd/logs.go b/cmd/minikube/cmd/logs.go index f2a1fba453..0c3854ea14 100644 --- a/cmd/minikube/cmd/logs.go +++ b/cmd/minikube/cmd/logs.go @@ -53,28 +53,28 @@ var logsCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { co := mustload.Running(ClusterFlagValue()) - bs, err := cluster.Bootstrapper(co.API, viper.GetString(cmdcfg.Bootstrapper), *co.Config, *co.CPNode) + bs, err := cluster.Bootstrapper(co.API, viper.GetString(cmdcfg.Bootstrapper), *co.Config, *co.CP.Node) if err != nil { exit.WithError("Error getting cluster bootstrapper", err) } - cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: co.CPRunner}) + cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: co.CP.Runner}) if err != nil { exit.WithError("Unable to get runtime", err) } if followLogs { - err := logs.Follow(cr, bs, *co.Config, co.CPRunner) + err := logs.Follow(cr, bs, *co.Config, co.CP.Runner) if err != nil { exit.WithError("Follow", err) } return } if showProblems { - problems := logs.FindProblems(cr, bs, *co.Config, co.CPRunner) + problems := logs.FindProblems(cr, bs, *co.Config, co.CP.Runner) logs.OutputProblems(problems, numberOfProblems) return } - err = logs.Output(cr, bs, *co.Config, co.CPRunner, numberOfLines) + err = logs.Output(cr, bs, *co.Config, co.CP.Runner, numberOfLines) if err != nil { out.Ln("") // Avoid exit.WithError, since it outputs the issue URL diff --git a/cmd/minikube/cmd/mount.go b/cmd/minikube/cmd/mount.go index 570a76447e..4e97c7b463 100644 --- a/cmd/minikube/cmd/mount.go +++ b/cmd/minikube/cmd/mount.go @@ -99,14 +99,14 @@ var mountCmd = &cobra.Command{ } co := mustload.Running(ClusterFlagValue()) - if co.CPHost.Driver.DriverName() == driver.None { + if co.CP.Host.Driver.DriverName() == driver.None { exit.UsageT(`'none' driver does not support 'minikube mount' command`) } var ip net.IP var err error if mountIP == "" { - ip, err = cluster.GetVMHostIP(co.CPHost) + ip, err = cluster.GetVMHostIP(co.CP.Host) if err != nil { exit.WithError("Error getting the host IP address to use from within the VM", err) } @@ -147,7 +147,7 @@ var mountCmd = &cobra.Command{ } bindIP := ip.String() // the ip to listen on the user's host machine - if driver.IsKIC(co.CPHost.Driver.DriverName()) && runtime.GOOS != "linux" { + if driver.IsKIC(co.CP.Host.Driver.DriverName()) && runtime.GOOS != "linux" { bindIP = "127.0.0.1" } out.T(out.Mounting, "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...", out.V{"sourcePath": hostPath, "destinationPath": vmPath}) @@ -177,7 +177,7 @@ var mountCmd = &cobra.Command{ go func() { for sig := range c { out.T(out.Unmount, "Unmounting {{.path}} ...", out.V{"path": vmPath}) - err := cluster.Unmount(co.CPRunner, vmPath) + err := cluster.Unmount(co.CP.Runner, vmPath) if err != nil { out.ErrT(out.FailureType, "Failed unmount: {{.error}}", out.V{"error": err}) } @@ -185,7 +185,7 @@ var mountCmd = &cobra.Command{ } }() - err = cluster.Mount(co.CPRunner, ip.String(), vmPath, cfg) + err = cluster.Mount(co.CP.Runner, ip.String(), vmPath, cfg) if err != nil { exit.WithError("mount failed", err) } diff --git a/cmd/minikube/cmd/podman-env.go b/cmd/minikube/cmd/podman-env.go index c55ba5f5f0..124a721de8 100644 --- a/cmd/minikube/cmd/podman-env.go +++ b/cmd/minikube/cmd/podman-env.go @@ -108,17 +108,17 @@ var podmanEnvCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { cname := ClusterFlagValue() co := mustload.Running(cname) - driverName := co.CPHost.DriverName + driverName := co.CP.Host.DriverName if driverName == driver.None { exit.UsageT(`'none' driver does not support 'minikube podman-env' command`) } - if ok := isPodmanAvailable(co.CPRunner); !ok { + if ok := isPodmanAvailable(co.CP.Runner); !ok { exit.WithCodeT(exit.Unavailable, `The podman service within '{{.cluster}}' is not active`, out.V{"cluster": cname}) } - client, err := createExternalSSHClient(co.CPHost.Driver) + client, err := createExternalSSHClient(co.CP.Host.Driver) if err != nil { exit.WithError("Error getting ssh client", err) } diff --git a/cmd/minikube/cmd/ssh.go b/cmd/minikube/cmd/ssh.go index a18ae297de..bbd39af369 100644 --- a/cmd/minikube/cmd/ssh.go +++ b/cmd/minikube/cmd/ssh.go @@ -43,14 +43,14 @@ var sshCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { cname := ClusterFlagValue() co := mustload.Running(cname) - if co.CPHost.DriverName == driver.None { + if co.CP.Host.DriverName == driver.None { exit.UsageT("'none' driver does not support 'minikube ssh' command") } var err error var n *config.Node if nodeName == "" { - n = co.CPNode + n = co.CP.Node } else { n, _, err = node.Retrieve(co.Config, nodeName) if err != nil { diff --git a/pkg/minikube/mustload/mustload.go b/pkg/minikube/mustload/mustload.go index 9897102638..ac5a0fbca3 100644 --- a/pkg/minikube/mustload/mustload.go +++ b/pkg/minikube/mustload/mustload.go @@ -41,10 +41,16 @@ import ( type ClusterController struct { Config *config.ClusterConfig API libmachine.API - CPHost *host.Host - CPNode *config.Node - CPRunner command.Runner DriverIP net.IP + CP ControlPlane +} + +type ControlPlane struct { + Host *host.Host + Node *config.Node + Runner command.Runner + ForwardedIP net.IP + ForwardedPort int } // Partial is a cmd-friendly way to load a cluster which may or may not be running @@ -107,26 +113,36 @@ func Running(name string) ClusterController { exit.WithError("Unable to get command runner", err) } - ips, err := host.Driver.GetIP() + ipStr, err := host.Driver.GetIP() if err != nil { exit.WithError("Unable to get driver IP", err) } - if driver.IsKIC(host.DriverName) { - ips = oci.DefaultBindIPV4 + ip := net.ParseIP(ipStr) + if ip == nil { + exit.WithCodeT(exit.Software, fmt.Sprintf("Unable to parse driver IP: %q", ipStr)) } - ip := net.ParseIP(ips) - if ip == nil { - exit.WithCodeT(exit.Software, fmt.Sprintf("Unable to parse driver IP: %q", ips)) + cpIP := cp.IP + cpPort := cp.Port + if driver.IsKIC(host.DriverName) { + cpIP = oci.DefaultBindIPV4 + cpPort, err = oci.ForwardedPort(cc.Driver, cc.Name, cp.Port) + if err != nil { + exit.WithError("Unable to get forwarded port", err) + } } return ClusterController{ - API: api, - Config: cc, - CPRunner: cr, - CPHost: host, - CPNode: &cp, + API: api, + Config: cc, + CP: ControlPlane{ + Runner: cr, + Host: host, + Node: &cp, + ForwardedIP: net.ParseIP(cpIP), + ForwardedPort: cpPort, + }, DriverIP: ip, } } @@ -135,7 +151,7 @@ func Running(name string) ClusterController { func Healthy(name string) ClusterController { co := Running(name) - as, err := kverify.APIServerStatus(co.CPRunner, net.ParseIP(co.CPNode.IP), co.CPNode.Port) + as, err := kverify.APIServerStatus(co.CP.Runner, co.CP.ForwardedIP, co.CP.ForwardedPort) if err != nil { out.T(out.FailureType, `Unable to get control plane status: {{.error}}`, out.V{"error": err}) exitTip("delete", name, exit.Unavailable) @@ -165,6 +181,6 @@ func ExampleCmd(cname string, action string) string { // exitTip returns an action tip and exits func exitTip(action string, profile string, code int) { command := ExampleCmd(profile, action) - out.T(out.Workaround, "To fix this, run: {{.command}}", out.V{"command": command}) + out.T(out.Workaround, `To fix this, run: "{{.command}}"`, out.V{"command": command}) os.Exit(code) } From 7f5f12c5f4fe18c444b8f0211e10f4e765d62c6f Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Sun, 29 Mar 2020 11:44:25 -0700 Subject: [PATCH 344/668] Get rid of DriverIP attr to prevent future misuse --- cmd/minikube/cmd/docker-env.go | 2 +- cmd/minikube/cmd/ip.go | 2 +- cmd/minikube/cmd/update-context.go | 9 ++++++--- pkg/minikube/mustload/mustload.go | 8 +++----- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/cmd/minikube/cmd/docker-env.go b/cmd/minikube/cmd/docker-env.go index 901e57fc7b..cf18ad0414 100644 --- a/cmd/minikube/cmd/docker-env.go +++ b/cmd/minikube/cmd/docker-env.go @@ -161,7 +161,7 @@ var dockerEnvCmd = &cobra.Command{ EnvConfig: sh, profile: cname, driver: driverName, - hostIP: co.DriverIP.String(), + hostIP: co.CP.ForwardedIP.String(), port: port, certsDir: localpath.MakeMiniPath("certs"), noProxy: noProxy, diff --git a/cmd/minikube/cmd/ip.go b/cmd/minikube/cmd/ip.go index 04dd705fb5..f91709f02f 100644 --- a/cmd/minikube/cmd/ip.go +++ b/cmd/minikube/cmd/ip.go @@ -29,6 +29,6 @@ var ipCmd = &cobra.Command{ Long: `Retrieves the IP address of the running cluster, and writes it to STDOUT.`, Run: func(cmd *cobra.Command, args []string) { co := mustload.Running(ClusterFlagValue()) - out.Ln(co.DriverIP.String()) + out.Ln(co.CP.ForwardedIP.String()) }, } diff --git a/cmd/minikube/cmd/update-context.go b/cmd/minikube/cmd/update-context.go index 39a76e5929..79e42fe96d 100644 --- a/cmd/minikube/cmd/update-context.go +++ b/cmd/minikube/cmd/update-context.go @@ -33,14 +33,17 @@ var updateContextCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { cname := ClusterFlagValue() co := mustload.Running(cname) - updated, err := kubeconfig.UpdateIP(co.DriverIP, cname, kubeconfig.PathFromEnv()) + ip := co.CP.ForwardedIP + + // ??? For KIC, should we also update the port ??? + updated, err := kubeconfig.UpdateIP(ip, cname, kubeconfig.PathFromEnv()) if err != nil { exit.WithError("update config", err) } if updated { - out.T(out.Celebrate, "{{.cluster}} IP has been updated to point at {{.ip}}", out.V{"cluster": cname, "ip": co.DriverIP}) + out.T(out.Celebrate, "{{.cluster}} IP has been updated to point at {{.ip}}", out.V{"cluster": cname, "ip": ip}) } else { - out.T(out.Meh, "{{.cluster}} IP was already correctly configured for {{.ip}}", out.V{"cluster": cname, "ip": co.DriverIP}) + out.T(out.Meh, "{{.cluster}} IP was already correctly configured for {{.ip}}", out.V{"cluster": cname, "ip": ip}) } }, diff --git a/pkg/minikube/mustload/mustload.go b/pkg/minikube/mustload/mustload.go index ac5a0fbca3..b6e24bdfac 100644 --- a/pkg/minikube/mustload/mustload.go +++ b/pkg/minikube/mustload/mustload.go @@ -39,10 +39,9 @@ import ( // ClusterController holds all the needed information for a minikube cluster type ClusterController struct { - Config *config.ClusterConfig - API libmachine.API - DriverIP net.IP - CP ControlPlane + Config *config.ClusterConfig + API libmachine.API + CP ControlPlane } type ControlPlane struct { @@ -143,7 +142,6 @@ func Running(name string) ClusterController { ForwardedIP: net.ParseIP(cpIP), ForwardedPort: cpPort, }, - DriverIP: ip, } } From cc731bb0101f30c449e46c936c8feb8c5532eeef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Str=C3=B6mberg?= Date: Sun, 29 Mar 2020 14:16:01 -0700 Subject: [PATCH 345/668] Add badges for downloads & most recent version --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 75423b7474..c9f85e00a8 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,9 @@ [![BuildStatus Widget]][BuildStatus Result] [![GoReport Widget]][GoReport Status] +[![Github All Releases](https://img.shields.io/github/downloads/kubernetes/minikube/total.svg)](https://github.com/kubernetes/minikube/releases/latest) +[![Latest Release](https://img.shields.io/github/v/release/kubernetes/minikube?include_prereleases)](https://github.com/kubernetes/minikube/releases/latest) + [BuildStatus Result]: https://travis-ci.org/kubernetes/minikube [BuildStatus Widget]: https://travis-ci.org/kubernetes/minikube.svg?branch=master From 15ddaf723ebe445d964ec472ce49c095d9acd84e Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 30 Mar 2020 08:40:12 -0700 Subject: [PATCH 346/668] Release schedule proposal --- .../schedule-proposal.md | 56 +++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 enhancements/proposed/20200316-release-schedule/schedule-proposal.md diff --git a/enhancements/proposed/20200316-release-schedule/schedule-proposal.md b/enhancements/proposed/20200316-release-schedule/schedule-proposal.md new file mode 100644 index 0000000000..eca00232e6 --- /dev/null +++ b/enhancements/proposed/20200316-release-schedule/schedule-proposal.md @@ -0,0 +1,56 @@ +# Release Schedule + +* First proposed: 2020-03-30 +* Authors: Thomas Stromberg (@tstromberg) + +## Reviewer Priorities + +Please review this proposal with the following priorities: + +* Does this fit with minikube's [principles](https://minikube.sigs.k8s.io/docs/concepts/principles/)? +* Are there other approaches to consider? +* Could the implementation be made simpler? +* Are there usability, reliability, or technical debt concerns? + +Please leave the above text in your proposal as instructions to the reader. + +## Summary + +Adding structure to the release process to encourage predictable stress-free releases with fewer regressions. + +## Goals + +* A decrease in release regressions +* Minimal disruption to development velocity +* Compatible with the upstream Kubernetes release schedule + +## Non-Goals + +* Maintaining release branches + +## Design Details + +minikube currently has 3 types of releases: + +* Feature release (v1.9.0) +* Bugfix release (v1.9.1) +* Beta releases + +This proposal maintains the pre-existing structure, but adds dates for when each step will occur: + +* Day 0: Create milestones for the next regression & feature release +* Day 7: Regression release (optional) +* Day 14: Early Beta release (optional) +* Day 21: Beta release +* Day 24: Feature freeze and optional final beta +* Day 28: Feature release + +To synchronize with Kubernetes release schedule (Tuesday afternoon PST), minikube releases should be Wednesday morning (PST). To select a final release date, consult [sig-release](https://github.com/kubernetes/sig-release/tree/master/releases) to see if there is an upcoming minor release of Kubernetes within the next 6 weeks. If so, schedule the minikube release to occur within 24 hours of it. + +Even with this schedule, it is assumed that release dates may slip. + +## Alternatives Considered + +### Release branches + +Rather than considering master to always be in a releasable state, we could maintain long-lived release branches. This adds a lot of overhead to the release manager, as they have to manage cherry-picks. From f1be2247215a0bea1d62f119e6e10854cf48f602 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 30 Mar 2020 08:49:21 -0700 Subject: [PATCH 347/668] Mention extension idea --- .../20200316-release-schedule/schedule-proposal.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/enhancements/proposed/20200316-release-schedule/schedule-proposal.md b/enhancements/proposed/20200316-release-schedule/schedule-proposal.md index eca00232e6..fdcb03ba08 100644 --- a/enhancements/proposed/20200316-release-schedule/schedule-proposal.md +++ b/enhancements/proposed/20200316-release-schedule/schedule-proposal.md @@ -54,3 +54,16 @@ Even with this schedule, it is assumed that release dates may slip. ### Release branches Rather than considering master to always be in a releasable state, we could maintain long-lived release branches. This adds a lot of overhead to the release manager, as they have to manage cherry-picks. + +### Extending cycle by a week + +As this process assumes a regression release at Day 7, it begs the question on whether or not a 5-week feature release cycle makes more sense: + +* Day 0: Create milestones for the next regression & feature release +* Day 7: Regression release (optional) +* Day 21: Beta release +* Day 28: Beta 2 release +* Day 31: Feature freeze and optional final beta +* Day 35: Feature release + +The downside is a slightly lower release velocity, the upside may be more a more stable final release. From 0696e90acd9d3f5e35c3bc55d4b9ae382c677d35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Str=C3=B6mberg?= Date: Mon, 30 Mar 2020 09:02:49 -0700 Subject: [PATCH 348/668] Fix HTML comments --- .github/ISSUE_TEMPLATE/__en-US.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/__en-US.md b/.github/ISSUE_TEMPLATE/__en-US.md index f917021a78..0e78dda079 100644 --- a/.github/ISSUE_TEMPLATE/__en-US.md +++ b/.github/ISSUE_TEMPLATE/__en-US.md @@ -2,13 +2,15 @@ name: English about: Report an issue --- -**Steps to reproduce the issue:** + +**Steps to reproduce the issue:** 1. 2. 3. -**Full output of failed command:** + +**Full output of failed command:** From 5f9515040d813061e6ea486a9c82acd7c29f4b38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Sat, 28 Mar 2020 14:19:11 +0100 Subject: [PATCH 349/668] Run dashboard with internal kubectl if not in path If "kubectl" is not in the PATH, then use the same cached binary as with the "minikube kubectl" command (version matching cluster). --- cmd/minikube/cmd/dashboard.go | 19 ++++++++++++------- cmd/minikube/cmd/kubectl.go | 21 +++++++++++++++------ 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/cmd/minikube/cmd/dashboard.go b/cmd/minikube/cmd/dashboard.go index c235adbab2..e3c9e93b6b 100644 --- a/cmd/minikube/cmd/dashboard.go +++ b/cmd/minikube/cmd/dashboard.go @@ -63,10 +63,8 @@ var dashboardCmd = &cobra.Command{ } } - kubectl, err := exec.LookPath("kubectl") - if err != nil { - exit.WithCodeT(exit.NoInput, "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/") - } + kubectlVersion := co.Config.KubernetesConfig.KubernetesVersion + var err error // Check dashboard status before enabling it dashboardAddon := assets.Addons["dashboard"] @@ -90,7 +88,7 @@ var dashboardCmd = &cobra.Command{ } out.ErrT(out.Launch, "Launching proxy ...") - p, hostPort, err := kubectlProxy(kubectl, cname) + p, hostPort, err := kubectlProxy(kubectlVersion, cname) if err != nil { exit.WithError("kubectl proxy", err) } @@ -124,10 +122,17 @@ var dashboardCmd = &cobra.Command{ } // kubectlProxy runs "kubectl proxy", returning host:port -func kubectlProxy(path string, contextName string) (*exec.Cmd, string, error) { +func kubectlProxy(kubectlVersion string, contextName string) (*exec.Cmd, string, error) { // port=0 picks a random system port - cmd := exec.Command(path, "--context", contextName, "proxy", "--port=0") + kubectlArgs := []string{"--context", contextName, "proxy", "--port=0"} + + var cmd *exec.Cmd + if kubectl, err := exec.LookPath("kubectl"); err == nil { + cmd = exec.Command(kubectl, kubectlArgs...) + } else if cmd, err = KubectlCommand(kubectlVersion, kubectlArgs...); err != nil { + return nil, "", err + } stdoutPipe, err := cmd.StdoutPipe() if err != nil { diff --git a/cmd/minikube/cmd/kubectl.go b/cmd/minikube/cmd/kubectl.go index f4867b45e0..3eca6dfb06 100644 --- a/cmd/minikube/cmd/kubectl.go +++ b/cmd/minikube/cmd/kubectl.go @@ -43,17 +43,12 @@ minikube kubectl -- get pods --namespace kube-system`, co := mustload.Healthy(ClusterFlagValue()) version := co.Config.KubernetesConfig.KubernetesVersion - if version == "" { - version = constants.DefaultKubernetesVersion - } - - path, err := node.CacheKubectlBinary(version) + c, err := KubectlCommand(version, args...) if err != nil { out.ErrLn("Error caching kubectl: %v", err) } glog.Infof("Running %s %v", path, args) - c := exec.Command(path, args...) c.Stdin = os.Stdin c.Stdout = os.Stdout c.Stderr = os.Stderr @@ -70,3 +65,17 @@ minikube kubectl -- get pods --namespace kube-system`, } }, } + +// KubectlCommand will return kubectl command with a version matching the cluster +func KubectlCommand(version string, args ...string) (*exec.Cmd, error) { + if version == "" { + version = constants.DefaultKubernetesVersion + } + + path, err := node.CacheKubectlBinary(version) + if err != nil { + return nil, err + } + + return exec.Command(path, args...), nil +} From d1769f6c31ec7ae75879a4d9b4c42bd0577d341b Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 30 Mar 2020 11:57:21 -0700 Subject: [PATCH 350/668] docs: Update hugo version used by netlify --- netlify.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netlify.toml b/netlify.toml index d335d812be..bbf6fcf397 100644 --- a/netlify.toml +++ b/netlify.toml @@ -4,7 +4,7 @@ publish = "site/public/" command = "pwd && cd themes/docsy && git submodule update -f --init && cd ../.. && hugo" [build.environment] -HUGO_VERSION = "0.59.0" +HUGO_VERSION = "0.68.3" [context.production.environment] HUGO_ENV = "production" From a1cc3188bb6e0598de68c72f61f6a0fc9e1d2922 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Mon, 30 Mar 2020 22:16:10 +0200 Subject: [PATCH 351/668] Implement options for the minikube version command Add --short and --output options, just like kubectl --- cmd/minikube/cmd/version.go | 41 ++++++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/cmd/minikube/cmd/version.go b/cmd/minikube/cmd/version.go index 00c61efd88..c56ca7cdea 100644 --- a/cmd/minikube/cmd/version.go +++ b/cmd/minikube/cmd/version.go @@ -17,20 +17,55 @@ limitations under the License. package cmd import ( + "encoding/json" "github.com/spf13/cobra" + "gopkg.in/yaml.v2" + "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/version" ) +var ( + versionOutput string + shortVersion bool +) + var versionCmd = &cobra.Command{ Use: "version", Short: "Print the version of minikube", Long: `Print the version of minikube.`, Run: func(command *cobra.Command, args []string) { - out.Ln("minikube version: %v", version.GetVersion()) + minikubeVersion := version.GetVersion() gitCommitID := version.GetGitCommitID() - if gitCommitID != "" { - out.Ln("commit: %v", gitCommitID) + data := map[string]string{ + "minikubeVersion": minikubeVersion, + "commit": gitCommitID, + } + switch versionOutput { + case "": + out.Ln("minikube version: %v", minikubeVersion) + if !shortVersion && gitCommitID != "" { + out.Ln("commit: %v", gitCommitID) + } + case "json": + json, err := json.Marshal(data) + if err != nil { + exit.WithError("version json failure", err) + } + out.Ln(string(json)) + case "yaml": + yaml, err := yaml.Marshal(data) + if err != nil { + exit.WithError("version yaml failure", err) + } + out.Ln(string(yaml)) + default: + exit.WithCodeT(exit.BadUsage, "error: --output must be 'yaml' or 'json'") } }, } + +func init() { + versionCmd.Flags().StringVarP(&versionOutput, "output", "o", "", "One of 'yaml' or 'json'.") + versionCmd.Flags().BoolVar(&shortVersion, "short", false, "Print just the version number.") +} From fd3e0f505a2141ab52864685143b40dcb82a6e72 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 30 Mar 2020 15:31:17 -0700 Subject: [PATCH 352/668] clean up minikube start output --- cmd/minikube/cmd/start.go | 1 + pkg/minikube/node/start.go | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 498849f437..780e2ec766 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -371,6 +371,7 @@ func runStart(cmd *cobra.Command, args []string) { ControlPlane: false, KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, } + out.Ln("") // extra newline for clarity on the command line err := node.Add(&cc, n) if err != nil { exit.WithError("adding node", err) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 44ff9bb1cc..0945a32ddd 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -66,6 +66,14 @@ const ( // Start spins up a guest and starts the kubernetes node. func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) *kubeconfig.Settings { + + cp := "" + if apiServer { + cp = "control plane " + } + + out.T(out.ThumbsUp, "Starting {{.controlPlane}}node {{.name}} in cluster {{.cluster}}", out.V{"controlPlane": cp, "name": n.Name, "cluster": cc.Name}) + var kicGroup errgroup.Group if driver.IsKIC(cc.Driver) { beginDownloadKicArtifacts(&kicGroup) From 2b68cb72d8bea912f4fe725b8fee5c22dc845b37 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 30 Mar 2020 15:36:11 -0700 Subject: [PATCH 353/668] glorb --- pkg/minikube/node/start.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 0945a32ddd..3b3ee6890f 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -66,7 +66,6 @@ const ( // Start spins up a guest and starts the kubernetes node. func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) *kubeconfig.Settings { - cp := "" if apiServer { cp = "control plane " From 4b49fb9a81e073b525b4127211e3c8724b622ce8 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Mon, 30 Mar 2020 17:56:55 -0700 Subject: [PATCH 354/668] Update ebpf tools doc --- .../en/docs/Tutorials/ebpf_tools_in_minikube.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/site/content/en/docs/Tutorials/ebpf_tools_in_minikube.md b/site/content/en/docs/Tutorials/ebpf_tools_in_minikube.md index 4931f35d9a..21dcb9b0b0 100644 --- a/site/content/en/docs/Tutorials/ebpf_tools_in_minikube.md +++ b/site/content/en/docs/Tutorials/ebpf_tools_in_minikube.md @@ -22,25 +22,25 @@ This tutorial will cover how to set up your minikube cluster so that you can run First, start minikube: ``` -$ minikube start +$ minikube start --iso-url https://storage.googleapis.com/minikube-performance/minikube.iso ``` You will need to download and extract necessary kernel headers within minikube: ```shell -$ minikube ssh -- curl -Lo /tmp/kernel-headers-linux-4.19.94.tar.lz4 https://storage.googleapis.com/minikube-kernel-headers/kernel-headers-linux-4.19.94.tar.lz4 +minikube ssh -- curl -Lo /tmp/kernel-headers-linux-4.19.94.tar.lz4 https://storage.googleapis.com/minikube-kernel-headers/kernel-headers-linux-4.19.94.tar.lz4 -$ minikube ssh -- sudo mkdir -p /lib/modules/4.19.94/build +minikube ssh -- sudo mkdir -p /lib/modules/4.19.94/build -$ minikube ssh -- sudo tar -I lz4 -C /lib/modules/4.19.94/build -xvf /tmp/kernel-headers-linux-4.19.94.tar.lz4 +minikube ssh -- sudo tar -I lz4 -C /lib/modules/4.19.94/build -xvf /tmp/kernel-headers-linux-4.19.94.tar.lz4 -$ minikube ssh -- rm /tmp/kernel-headers-linux-4.19.94.tar.lz4 +minikube ssh -- rm /tmp/kernel-headers-linux-4.19.94.tar.lz4 ``` You can now run [BCC tools](https://github.com/iovisor/bcc) as a Docker container in minikube: ```shell -$ minikube ssh -- docker run -it --rm --privileged -v /lib/modules:/lib/modules:ro -v /usr/src:/usr/src:ro -v /etc/localtime:/etc/localtime:ro --workdir /usr/share/bcc/tools zlim/bcc ./execsnoop +$ minikube ssh -- docker run --rm --privileged -v /lib/modules:/lib/modules:ro -v /usr/src:/usr/src:ro -v /etc/localtime:/etc/localtime:ro --workdir /usr/share/bcc/tools zlim/bcc ./execsnoop Unable to find image 'zlim/bcc:latest' locally From 9d64dfdfe0a8169d9e391eba0f1f3d2a7fcfe67b Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 30 Mar 2020 19:31:17 -0700 Subject: [PATCH 355/668] Make site mostly compatible with hugo v0.69.0 through criminal hacks --- site/config.toml | 48 +++++++-------------- site/layouts/partials/sidebar-tree.html | 57 +++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 32 deletions(-) create mode 100644 site/layouts/partials/sidebar-tree.html diff --git a/site/config.toml b/site/config.toml index c241a41581..4b9529f580 100644 --- a/site/config.toml +++ b/site/config.toml @@ -29,43 +29,27 @@ pygmentsStyle = "tango" # First one is picked as the Twitter card image if not set on page. #images = ["images/project-illustration.png"] +# Auto-generate the menu +# sectionPagesMenu = "main" + # Configure how URLs look like per section. [permalinks] blog = "/:section/:year/:month/:day/:slug/" -[module] - [[module.mounts]] - source = "../deploy/addons/gvisor/" - target = "content/gvisor/" - [[module.mounts]] - source = "../deploy/addons/helm-tiller/" - target = "content/helm-tiller/" - [[module.mounts]] - source = "../deploy/addons/istio/" - target = "content/istio/" - [[module.mounts]] - source = "../deploy/addons/ingress-dns/" - target = "content/ingress-dns/" - [[module.mounts]] - source = "../deploy/addons/storage-provisioner-gluster/" - target = "content/storage-provisioner-gluster/" - [[module.mounts]] - source = "../deploy/addons/layouts/" - target = "layouts" +[markup] + [markup.highlight] + codeFences = true + hl_Lines = "" + lineNoStart = 1 + lineNos = false + lineNumbersInTable = true + noClasses = true + style = "vs" + tabWidth = 4 - [[module.mounts]] - source = "content/en" - target = "content" - [[module.mounts]] - source = "layouts" - target = "layouts" - -## Configuration for BlackFriday markdown parser: https://github.com/russross/blackfriday -[blackfriday] -plainIDAnchors = true -hrefTargetBlank = true -angledQuotes = false -latexDashes = true +# allow html in markdown +[markup.goldmark.renderer] + unsafe=true # Image processing configuration. [imaging] diff --git a/site/layouts/partials/sidebar-tree.html b/site/layouts/partials/sidebar-tree.html new file mode 100644 index 0000000000..9b4dfa3503 --- /dev/null +++ b/site/layouts/partials/sidebar-tree.html @@ -0,0 +1,57 @@ +{{/* We cache this partial for bigger sites and set the active class client side. */}} +{{ $shouldDelayActive := ge (len .Site.Pages) 2000 }} +
+ {{ if not .Site.Params.ui.sidebar_search_disable }} + + {{ end }} + +
+{{ define "section-tree-nav-section" }} +{{ $s := .section }} +{{ $p := .page }} +{{ $shouldDelayActive := .delayActive }} +{{ $active := eq $p.CurrentSection $s }} +{{ $show := or (and (not $p.Site.Params.ui.sidebar_menu_compact) ($p.IsAncestor $s)) ($p.IsDescendant $s) }} + +{{/* minikube hack: Override $show due to a Hugo upgrade bug */}} +{{ $show = true }} +{{/* end minikube hack */}} + +{{ $sid := $s.RelPermalink | anchorize }} +
    +
  • + {{ $s.LinkTitle }} +
  • +
      +
    • + {{ $pages := where (union $s.Pages $s.Sections).ByWeight ".Params.toc_hide" "!=" true }} + {{ $pages := $pages | first 50 }} + {{ range $pages }} + {{ if .IsPage }} + {{ $mid := printf "m-%s" (.RelPermalink | anchorize) }} + + {{/* minikube hack: Override $active due to a Hugo upgrade bug */}} + {{ if $active }} + {{ $activePage := eq . $p }} + {{ .LinkTitle }} + {{ end }} + {{/* end minikube hack */}} + {{ else }} + {{ template "section-tree-nav-section" (dict "page" $p "section" .) }} + {{ end }} + {{ end }} +
    • +
    +
+{{ end }} From f956f6a69ab60b49d9aa1d8a646d93bbd70b1693 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Mon, 30 Mar 2020 19:36:12 -0700 Subject: [PATCH 356/668] Cleaner diff --- site/config.toml | 3 --- site/layouts/partials/sidebar-tree.html | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/site/config.toml b/site/config.toml index 4b9529f580..e0c6e2a4bc 100644 --- a/site/config.toml +++ b/site/config.toml @@ -29,9 +29,6 @@ pygmentsStyle = "tango" # First one is picked as the Twitter card image if not set on page. #images = ["images/project-illustration.png"] -# Auto-generate the menu -# sectionPagesMenu = "main" - # Configure how URLs look like per section. [permalinks] blog = "/:section/:year/:month/:day/:slug/" diff --git a/site/layouts/partials/sidebar-tree.html b/site/layouts/partials/sidebar-tree.html index 9b4dfa3503..ebf99e2a74 100644 --- a/site/layouts/partials/sidebar-tree.html +++ b/site/layouts/partials/sidebar-tree.html @@ -1,5 +1,5 @@ -{{/* We cache this partial for bigger sites and set the active class client side. */}} -{{ $shouldDelayActive := ge (len .Site.Pages) 2000 }} +{{/* minikube hack: temporarily forked from docsy/layouts/partials/sidebar-tree.html due to hugo v0.69 compatibility issues */}} +
{{ if not .Site.Params.ui.sidebar_search_disable }}
+ + + {{ define "section-tree-nav-section" }} {{ $s := .section }} {{ $p := .page }} {{ $shouldDelayActive := .delayActive }} -{{ $active := eq $p.CurrentSection $s }} -{{ $show := or (and (not $p.Site.Params.ui.sidebar_menu_compact) ($p.IsAncestor $s)) ($p.IsDescendant $s) }} +{{ $activeSection := eq $p.CurrentSection $s }} -{{/* minikube hack: Override $show due to a Hugo upgrade bug */}} -{{ $show = true }} +{{/* minikube hack: Override $showSection due to a Hugo upgrade bug */}} +{{ $showSection := false }} +{{ $expandSection := false }} +{{ $sid := $s.RelPermalink | anchorize }} +{{ $sectionParent := $s.Parent.Title | anchorize }} +{{ $csid := $p.CurrentSection.Title | anchorize }} + +{{ if $p.IsDescendant $s }} + + {{ $showSection = true }} +{{ else if eq $sectionParent "minikube" }} + + {{ $showSection = true }} +{{ else if eq $sectionParent "welcome" }} + + {{ $showSection = true }} +{{ else if eq $sectionParent "handbook" }} + + {{ $showSection = true }} +{{ else if eq $p.CurrentSection $s.Parent }} + + {{ $showSection = true }} +{{ else if $p.Parent.IsAncestor $s }} + + {{ if eq $s $p.CurrentSection }} + {{ $showSection = true }} + {{ end }} + +{{ end }} + + {{/* end minikube hack */}} {{ $sid := $s.RelPermalink | anchorize }} -