diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml new file mode 100644 index 0000000000..a8cc54413e --- /dev/null +++ b/.github/workflows/master.yml @@ -0,0 +1,537 @@ +name: MasterCI +on: + push: + branches: + - master + paths: + - '**.go' +env: + GOPROXY: https://proxy.golang.org +jobs: + # Runs before all other jobs + # builds the minikube binaries + build_minikube: + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + - name: Download Dependencies + run : go mod download + - name: Build Binaries + run : | + make minikube-linux-amd64 + make e2e-linux-amd64 + cp -r test/integration/testdata ./out + whoami + echo github ref $GITHUB_REF + echo workflow $GITHUB_WORKFLOW + echo home $HOME + echo event name $GITHUB_EVENT_NAME + echo workspace $GITHUB_WORKSPACE + echo "end of debug stuff" + echo $(which jq) + - uses: actions/upload-artifact@v1 + with: + name: minikube_binaries + path: out + lint: + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + - name: Install libvirt + run : | + sudo apt-get update + sudo apt-get install -y libvirt-dev + - name: Download Dependencies + run : go mod download + - name: Lint + env: + TESTSUITE: lintall + run : make test + continue-on-error: false + unit_test: + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + - name: Install libvirt + run : | + sudo apt-get update + sudo apt-get install -y libvirt-dev + - name: Download Dependencies + run : go mod download + - name: Unit Test + env: + TESTSUITE: unittest + run : + make test + continue-on-error: false + # Run the following integration tests after the build_minikube + # They will run in parallel and use the binaries in previous step + functional_test_docker_ubuntu: + needs: [build_minikube] + env: + TIME_ELAPSED: time + JOB_NAME: "functional_test_docker_ubuntu" + GOPOGH_RESULT: "" + SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 + runs-on: ubuntu-18.04 + steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl + kubectl version --client=true + - name: Docker Info + shell: bash + run: | + echo "--------------------------" + docker version || true + echo "--------------------------" + docker info || true + echo "--------------------------" + docker system df || true + echo "--------------------------" + docker system info || true + echo "--------------------------" + docker ps || true + echo "--------------------------" + - name: Install lz4 + shell: bash + run: | + sudo apt-get update -qq + sudo apt-get -qq -y install liblz4-tool + - name: Install gopogh + shell: bash + run: | + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64 + sudo install gopogh-linux-amd64 /usr/local/bin/gopogh + - name: Download Binaries + uses: actions/download-artifact@v1 + with: + name: minikube_binaries + - name: Run Integration Test + continue-on-error: false + # bash {0} to allow test to continue to next step. in case of + shell: bash {0} + run: | + cd minikube_binaries + mkdir -p report + mkdir -p testhome + chmod a+x e2e-* + chmod a+x minikube-* + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + START_TIME=$(date -u +%s) + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.run TestFunctional -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + END_TIME=$(date -u +%s) + TIME_ELAPSED=$(($END_TIME-$START_TIME)) + min=$((${TIME_ELAPSED}/60)) + sec=$((${TIME_ELAPSED}%60)) + TIME_ELAPSED="${min} min $sec seconds " + echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED} + - name: Generate HTML Report + shell: bash + run: | + cd minikube_binaries + export PATH=${PATH}:`go env GOPATH`/bin + go tool test2json -t < ./report/testout.txt > ./report/testout.json || true + STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true + echo status: ${STAT} + FailNum=$(echo $STAT | jq '.NumberOfFail') + TestsNum=$(echo $STAT | jq '.NumberOfTests') + GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}" + echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT} + echo ::set-env name=STAT::${STAT} + - uses: actions/upload-artifact@v1 + with: + name: functional_test_docker_ubuntu + path: minikube_binaries/report + - name: The End Result functional_test_docker_ubuntu + shell: bash + run: | + echo ${GOPOGH_RESULT} + numFail=$(echo $STAT | jq '.NumberOfFail') + echo "----------------${numFail} Failures----------------------------" + echo $STAT | jq '.FailedTests' || true + echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" + if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi + addons_certs_tests_docker_ubuntu: + runs-on: ubuntu-18.04 + env: + TIME_ELAPSED: time + JOB_NAME: "addons_certs_tests_docker_ubuntu" + GOPOGH_RESULT: "" + SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 + needs: [build_minikube] + steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl + kubectl version --client=true + - name: Install lz4 + shell: bash + run: | + sudo apt-get update -qq + sudo apt-get -qq -y install liblz4-tool + - name: Docker Info + shell: bash + run: | + echo "--------------------------" + docker version || true + echo "--------------------------" + docker info || true + echo "--------------------------" + docker system df || true + echo "--------------------------" + docker system info || true + echo "--------------------------" + docker ps || true + echo "--------------------------" + - name: Install gopogh + shell: bash + run: | + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64 + sudo install gopogh-linux-amd64 /usr/local/bin/gopogh + - name: Download Binaries + uses: actions/download-artifact@v1 + with: + name: minikube_binaries + - name: Run Integration Test + continue-on-error: true + # bash {0} to allow test to continue to next step. in case of + shell: bash {0} + run: | + cd minikube_binaries + mkdir -p report + mkdir -p testhome + chmod a+x e2e-* + chmod a+x minikube-* + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + START_TIME=$(date -u +%s) + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestAddons|TestCertOptions)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + END_TIME=$(date -u +%s) + TIME_ELAPSED=$(($END_TIME-$START_TIME)) + min=$((${TIME_ELAPSED}/60)) + sec=$((${TIME_ELAPSED}%60)) + TIME_ELAPSED="${min} min $sec seconds " + echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED} + - name: Generate HTML Report + shell: bash + run: | + cd minikube_binaries + export PATH=${PATH}:`go env GOPATH`/bin + go tool test2json -t < ./report/testout.txt > ./report/testout.json || true + STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true + echo status: ${STAT} + FailNum=$(echo $STAT | jq '.NumberOfFail') + TestsNum=$(echo $STAT | jq '.NumberOfTests') + GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}" + echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT} + echo ::set-env name=STAT::${STAT} + - uses: actions/upload-artifact@v1 + with: + name: addons_certs_tests_docker_ubuntu + path: minikube_binaries/report + - name: The End Result - addons_certs_tests_docker_ubuntu + shell: bash + run: | + echo ${GOPOGH_RESULT} + numFail=$(echo $STAT | jq '.NumberOfFail') + echo "----------------${numFail} Failures----------------------------" + echo $STAT | jq '.FailedTests' || true + echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" + if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi + multinode_pause_tests_docker_ubuntu: + runs-on: ubuntu-18.04 + env: + TIME_ELAPSED: time + JOB_NAME: "multinode_pause_tests_docker_ubuntu" + GOPOGH_RESULT: "" + SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 + needs: [build_minikube] + steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl + kubectl version --client=true + - name: Install lz4 + shell: bash + run: | + sudo apt-get update -qq + sudo apt-get -qq -y install liblz4-tool + - name: Docker Info + shell: bash + run: | + echo "--------------------------" + docker version || true + echo "--------------------------" + docker info || true + echo "--------------------------" + docker system df || true + echo "--------------------------" + docker system info || true + echo "--------------------------" + docker ps || true + echo "--------------------------" + - name: Install gopogh + shell: bash + run: | + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64 + sudo install gopogh-linux-amd64 /usr/local/bin/gopogh + - name: Download Binaries + uses: actions/download-artifact@v1 + with: + name: minikube_binaries + - name: Run Integration Test + continue-on-error: true + # bash {0} to allow test to continue to next step. in case of + shell: bash {0} + run: | + cd minikube_binaries + mkdir -p report + mkdir -p testhome + chmod a+x e2e-* + chmod a+x minikube-* + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + START_TIME=$(date -u +%s) + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPause|TestMultiNode)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + END_TIME=$(date -u +%s) + TIME_ELAPSED=$(($END_TIME-$START_TIME)) + min=$((${TIME_ELAPSED}/60)) + sec=$((${TIME_ELAPSED}%60)) + TIME_ELAPSED="${min} min $sec seconds " + echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED} + - name: Generate HTML Report + shell: bash + run: | + cd minikube_binaries + export PATH=${PATH}:`go env GOPATH`/bin + go tool test2json -t < ./report/testout.txt > ./report/testout.json || true + STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true + echo status: ${STAT} + FailNum=$(echo $STAT | jq '.NumberOfFail') + TestsNum=$(echo $STAT | jq '.NumberOfTests') + GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}" + echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT} + echo ::set-env name=STAT::${STAT} + - uses: actions/upload-artifact@v1 + with: + name: multinode_pause_tests_docker_ubuntu + path: minikube_binaries/report + - name: The End Result - multinode_pause_tests_docker_ubuntu + shell: bash + run: | + echo ${GOPOGH_RESULT} + numFail=$(echo $STAT | jq '.NumberOfFail') + echo "----------------${numFail} Failures----------------------------" + echo $STAT | jq '.FailedTests' || true + echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" + if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi + preload_docker_flags_tests_docker_ubuntu: + runs-on: ubuntu-18.04 + env: + TIME_ELAPSED: time + JOB_NAME: "preload_docker_flags_tests_docker_ubuntu" + GOPOGH_RESULT: "" + SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 + needs: [build_minikube] + steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl + kubectl version --client=true + - name: Install lz4 + shell: bash + run: | + sudo apt-get update -qq + sudo apt-get -qq -y install liblz4-tool + - name: Docker Info + shell: bash + run: | + echo "--------------------------" + docker version || true + echo "--------------------------" + docker info || true + echo "--------------------------" + docker system df || true + echo "--------------------------" + docker system info || true + echo "--------------------------" + docker ps || true + echo "--------------------------" + - name: Install gopogh + shell: bash + run: | + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64 + sudo install gopogh-linux-amd64 /usr/local/bin/gopogh + - name: Download Binaries + uses: actions/download-artifact@v1 + with: + name: minikube_binaries + - name: Run Integration Test + continue-on-error: true + # bash {0} to allow test to continue to next step. in case of + shell: bash {0} + run: | + cd minikube_binaries + mkdir -p report + mkdir -p testhome + chmod a+x e2e-* + chmod a+x minikube-* + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + START_TIME=$(date -u +%s) + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPreload|TestDockerFlags)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + END_TIME=$(date -u +%s) + TIME_ELAPSED=$(($END_TIME-$START_TIME)) + min=$((${TIME_ELAPSED}/60)) + sec=$((${TIME_ELAPSED}%60)) + TIME_ELAPSED="${min} min $sec seconds " + echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED} + - name: Generate HTML Report + shell: bash + run: | + cd minikube_binaries + export PATH=${PATH}:`go env GOPATH`/bin + go tool test2json -t < ./report/testout.txt > ./report/testout.json || true + STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true + echo status: ${STAT} + FailNum=$(echo $STAT | jq '.NumberOfFail') + TestsNum=$(echo $STAT | jq '.NumberOfTests') + GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}" + echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT} + echo ::set-env name=STAT::${STAT} + - uses: actions/upload-artifact@v1 + with: + name: preload_docker_flags_tests_docker_ubuntu + path: minikube_binaries/report + - name: The End Result - preload_docker_flags_tests_docker_ubuntu + shell: bash + run: | + echo ${GOPOGH_RESULT} + numFail=$(echo $STAT | jq '.NumberOfFail') + echo "----------------${numFail} Failures----------------------------" + echo $STAT | jq '.FailedTests' || true + echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" + if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi + functional_baremetal_ubuntu18_04: + needs: [build_minikube] + env: + TIME_ELAPSED: time + JOB_NAME: "functional_baremetal_ubuntu18_04" + GOPOGH_RESULT: "" + SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 + runs-on: ubuntu-18.04 + steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl + kubectl version --client=true + # conntrack is required for kubernetes 1.18 and higher + # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon + - name: Install tools for none + shell: bash + run: | + sudo apt-get update -qq + sudo apt-get -qq -y install conntrack + sudo apt-get -qq -y install socat + VERSION="v1.17.0" + curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz + sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin + - name: Install gopogh + shell: bash + run: | + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64 + sudo install gopogh-linux-amd64 /usr/local/bin/gopogh + - name: Download Binaries + uses: actions/download-artifact@v1 + with: + name: minikube_binaries + - name: Run Integration Test + continue-on-error: true + # bash {0} to allow test to continue to next step. in case of + shell: bash {0} + run: | + cd minikube_binaries + mkdir -p report + mkdir -p testhome + chmod a+x e2e-* + chmod a+x minikube-* + START_TIME=$(date -u +%s) + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.run TestFunctional -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + END_TIME=$(date -u +%s) + TIME_ELAPSED=$(($END_TIME-$START_TIME)) + min=$((${TIME_ELAPSED}/60)) + sec=$((${TIME_ELAPSED}%60)) + TIME_ELAPSED="${min} min $sec seconds " + echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED} + - name: Generate HTML Report + shell: bash + run: | + cd minikube_binaries + export PATH=${PATH}:`go env GOPATH`/bin + go tool test2json -t < ./report/testout.txt > ./report/testout.json || true + STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true + echo status: ${STAT} + FailNum=$(echo $STAT | jq '.NumberOfFail') + TestsNum=$(echo $STAT | jq '.NumberOfTests') + GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}" + echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT} + echo ::set-env name=STAT::${STAT} + - uses: actions/upload-artifact@v1 + with: + name: none_ubuntu18_04 + path: minikube_binaries/report + - name: The End Result - None on Ubuntu 18:04 + shell: bash + run: | + echo ${GOPOGH_RESULT} + numFail=$(echo $STAT | jq '.NumberOfFail') + echo "----------------${numFail} Failures----------------------------" + echo $STAT | jq '.FailedTests' || true + echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" + if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi + # After all integration tests finished + # collect all the reports and upload them + upload_all_reports: + if: always() + needs: [functional_test_docker_ubuntu, addons_certs_tests_docker_ubuntu, multinode_pause_tests_docker_ubuntu, preload_docker_flags_tests_docker_ubuntu, functional_baremetal_ubuntu18_04] + runs-on: ubuntu-18.04 + steps: + - name: download all reports + uses: actions/download-artifact@v2-preview + - name: upload all reports + shell: bash {0} + continue-on-error: true + run: | + mkdir -p all_reports + ls -lah + cp -r ./functional_test_docker_ubuntu ./all_reports/ + cp -r ./addons_certs_tests_docker_ubuntu ./all_reports/ + cp -r ./multinode_pause_tests_docker_ubuntu ./all_reports/ + cp -r ./preload_docker_flags_tests_docker_ubuntu ./all_reports/ + cp -r ./functional_baremetal_ubuntu18_04 ./all_reports/ + - uses: actions/upload-artifact@v1 + with: + name: all_reports + path: all_reports \ No newline at end of file diff --git a/.github/workflows/main.yml b/.github/workflows/pr.yml similarity index 65% rename from .github/workflows/main.yml rename to .github/workflows/pr.yml index 68a9561d78..de141079a8 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/pr.yml @@ -1,15 +1,14 @@ name: CI -on: [pull_request] +on: + pull_request: + paths: + - '**.go' env: GOPROXY: https://proxy.golang.org jobs: # Runs before all other jobs # builds the minikube binaries build_minikube: - env: - TIME_ELAPSED: time - JOB_NAME: "Docker_Ubuntu_16_04" - GOPOGH_RESULT: "" runs-on: ubuntu-18.04 steps: - uses: actions/checkout@v2 @@ -19,8 +18,6 @@ jobs: run : | make minikube-linux-amd64 make e2e-linux-amd64 - make minikube-windows-amd64.exe - make e2e-windows-amd64.exe cp -r test/integration/testdata ./out whoami echo github ref $GITHUB_REF @@ -35,10 +32,6 @@ jobs: name: minikube_binaries path: out lint: - env: - TIME_ELAPSED: time - JOB_NAME: "lint" - GOPOGH_RESULT: "" runs-on: ubuntu-18.04 steps: - uses: actions/checkout@v2 @@ -54,10 +47,6 @@ jobs: run : make test continue-on-error: false unit_test: - env: - TIME_ELAPSED: time - JOB_NAME: "unit_test" - GOPOGH_RESULT: "" runs-on: ubuntu-18.04 steps: - uses: actions/checkout@v2 @@ -74,15 +63,15 @@ jobs: make test continue-on-error: false # Run the following integration tests after the build_minikube - # They will run in parallel and use the binaries in previous step - docker_ubuntu_16_04: + # They will run in parallel and use the binaries in previous step + functional_test_docker_ubuntu: needs: [build_minikube] env: TIME_ELAPSED: time - JOB_NAME: "Docker_Ubuntu_16_04" + JOB_NAME: "functional_test_docker_ubuntu" GOPOGH_RESULT: "" SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 - runs-on: ubuntu-16.04 + runs-on: ubuntu-18.04 steps: - name: Install kubectl shell: bash @@ -112,7 +101,7 @@ jobs: - name: Install gopogh shell: bash run: | - curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64 + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64 sudo install gopogh-linux-amd64 /usr/local/bin/gopogh - name: Download Binaries uses: actions/download-artifact@v1 @@ -128,8 +117,10 @@ jobs: mkdir -p testhome chmod a+x e2e-* chmod a+x minikube-* + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.run TestFunctional -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -151,9 +142,9 @@ jobs: echo ::set-env name=STAT::${STAT} - uses: actions/upload-artifact@v1 with: - name: docker_ubuntu_16_04 + name: functional_test_docker_ubuntu path: minikube_binaries/report - - name: The End Result Docker on ubuntu 16:04 + - name: The End Result functional_test_docker_ubuntu shell: bash run: | echo ${GOPOGH_RESULT} @@ -164,11 +155,11 @@ jobs: numPass=$(echo $STAT | jq '.NumberOfPass') echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi - docker_ubuntu_18_04: + addons_certs_tests_docker_ubuntu: runs-on: ubuntu-18.04 env: TIME_ELAPSED: time - JOB_NAME: "Docker_Ubuntu_18_04" + JOB_NAME: "addons_certs_tests_docker_ubuntu" GOPOGH_RESULT: "" SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 needs: [build_minikube] @@ -201,7 +192,7 @@ jobs: - name: Install gopogh shell: bash run: | - curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64 + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64 sudo install gopogh-linux-amd64 /usr/local/bin/gopogh - name: Download Binaries uses: actions/download-artifact@v1 @@ -217,8 +208,10 @@ jobs: mkdir -p testhome chmod a+x e2e-* chmod a+x minikube-* + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestAddons|TestCertOptions)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -240,9 +233,9 @@ jobs: echo ::set-env name=STAT::${STAT} - uses: actions/upload-artifact@v1 with: - name: docker_ubuntu_18_04 + name: addons_certs_tests_docker_ubuntu path: minikube_binaries/report - - name: The End Result - Docker On Ubuntu 18:04 + - name: The End Result - addons_certs_tests_docker_ubuntu shell: bash run: | echo ${GOPOGH_RESULT} @@ -253,78 +246,14 @@ jobs: numPass=$(echo $STAT | jq '.NumberOfPass') echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi - docker_on_windows: - needs: [build_minikube] + multinode_pause_tests_docker_ubuntu: + runs-on: ubuntu-18.04 env: TIME_ELAPSED: time - JOB_NAME: "Docker_on_windows" - COMMIT_STATUS: "" - runs-on: windows-latest - steps: - - uses: actions/checkout@v2 - - name: Docker Info - shell: bash - run: | - docker info || true - docker version || true - docker ps || true - - name: Download gopogh - run: | - curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh.exe - shell: bash - - name: Download binaries - uses: actions/download-artifact@v1 - with: - name: minikube_binaries - - name: run integration test - continue-on-error: true - run: | - set +euo pipefail - mkdir -p report - mkdir -p testhome - START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe -minikube-start-args=--vm-driver=docker -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt - END_TIME=$(date -u +%s) - TIME_ELAPSED=$(($END_TIME-$START_TIME)) - min=$((${TIME_ELAPSED}/60)) - sec=$((${TIME_ELAPSED}%60)) - TIME_ELAPSED="${min} min $sec seconds" - echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED} - shell: bash - - name: Generate html report - run: | - go tool test2json -t < ./report/testout.txt > ./report/testout.json || true - STAT=$(${GITHUB_WORKSPACE}/gopogh.exe -in ./report/testout.json -out ./report/testout.html -name " $GITHUB_REF" -repo "${JOB_NAME} ${GITHUB_REF} ${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true - echo status: ${STAT} - FailNum=$(echo $STAT | jq '.NumberOfFail') - TestsNum=$(echo $STAT | jq '.NumberOfTests') - GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}" - echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT} - echo ::set-env name=STAT::${STAT} - shell: bash - - uses: actions/upload-artifact@v1 - with: - name: docker_on_windows - path: report - - name: The End Result - run: | - echo ${GOPOGH_RESULT} - numFail=$(echo $STAT | jq '.NumberOfFail') - echo "----------------${numFail} Failures----------------------------" - echo $STAT | jq '.FailedTests' || true - echo "--------------------------------------------" - numPass=$(echo $STAT | jq '.NumberOfPass') - echo "*** $numPass Passed ***" - if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi - shell: bash - none_ubuntu16_04: - needs: [build_minikube] - env: - TIME_ELAPSED: time - JOB_NAME: "None_Ubuntu_16_04" + JOB_NAME: "multinode_pause_tests_docker_ubuntu" GOPOGH_RESULT: "" SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 - runs-on: ubuntu-16.04 + needs: [build_minikube] steps: - name: Install kubectl shell: bash @@ -332,21 +261,29 @@ jobs: curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl kubectl version --client=true - # conntrack is required for kubernetes 1.18 and higher - # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon - - name: Install tools for none + - name: Install lz4 shell: bash run: | sudo apt-get update -qq - sudo apt-get -qq -y install conntrack - sudo apt-get -qq -y install socat - VERSION="v1.17.0" - curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz - sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin + sudo apt-get -qq -y install liblz4-tool + - name: Docker Info + shell: bash + run: | + echo "--------------------------" + docker version || true + echo "--------------------------" + docker info || true + echo "--------------------------" + docker system df || true + echo "--------------------------" + docker system info || true + echo "--------------------------" + docker ps || true + echo "--------------------------" - name: Install gopogh shell: bash run: | - curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64 + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64 sudo install gopogh-linux-amd64 /usr/local/bin/gopogh - name: Download Binaries uses: actions/download-artifact@v1 @@ -362,8 +299,10 @@ jobs: mkdir -p testhome chmod a+x e2e-* chmod a+x minikube-* + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPause|TestMultiNode)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -385,9 +324,9 @@ jobs: echo ::set-env name=STAT::${STAT} - uses: actions/upload-artifact@v1 with: - name: none_ubuntu16_04 + name: multinode_pause_tests_docker_ubuntu path: minikube_binaries/report - - name: The End Result - None On Ubuntu 16:04 + - name: The End Result - multinode_pause_tests_docker_ubuntu shell: bash run: | echo ${GOPOGH_RESULT} @@ -398,11 +337,102 @@ jobs: numPass=$(echo $STAT | jq '.NumberOfPass') echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi - none_ubuntu18_04: + preload_docker_flags_tests_docker_ubuntu: + runs-on: ubuntu-18.04 + env: + TIME_ELAPSED: time + JOB_NAME: "preload_docker_flags_tests_docker_ubuntu" + GOPOGH_RESULT: "" + SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 + needs: [build_minikube] + steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl + kubectl version --client=true + - name: Install lz4 + shell: bash + run: | + sudo apt-get update -qq + sudo apt-get -qq -y install liblz4-tool + - name: Docker Info + shell: bash + run: | + echo "--------------------------" + docker version || true + echo "--------------------------" + docker info || true + echo "--------------------------" + docker system df || true + echo "--------------------------" + docker system info || true + echo "--------------------------" + docker ps || true + echo "--------------------------" + - name: Install gopogh + shell: bash + run: | + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64 + sudo install gopogh-linux-amd64 /usr/local/bin/gopogh + - name: Download Binaries + uses: actions/download-artifact@v1 + with: + name: minikube_binaries + - name: Run Integration Test + continue-on-error: true + # bash {0} to allow test to continue to next step. in case of + shell: bash {0} + run: | + cd minikube_binaries + mkdir -p report + mkdir -p testhome + chmod a+x e2e-* + chmod a+x minikube-* + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + START_TIME=$(date -u +%s) + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPreload|TestDockerFlags)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + END_TIME=$(date -u +%s) + TIME_ELAPSED=$(($END_TIME-$START_TIME)) + min=$((${TIME_ELAPSED}/60)) + sec=$((${TIME_ELAPSED}%60)) + TIME_ELAPSED="${min} min $sec seconds " + echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED} + - name: Generate HTML Report + shell: bash + run: | + cd minikube_binaries + export PATH=${PATH}:`go env GOPATH`/bin + go tool test2json -t < ./report/testout.txt > ./report/testout.json || true + STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true + echo status: ${STAT} + FailNum=$(echo $STAT | jq '.NumberOfFail') + TestsNum=$(echo $STAT | jq '.NumberOfTests') + GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}" + echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT} + echo ::set-env name=STAT::${STAT} + - uses: actions/upload-artifact@v1 + with: + name: preload_docker_flags_tests_docker_ubuntu + path: minikube_binaries/report + - name: The End Result - preload_docker_flags_tests_docker_ubuntu + shell: bash + run: | + echo ${GOPOGH_RESULT} + numFail=$(echo $STAT | jq '.NumberOfFail') + echo "----------------${numFail} Failures----------------------------" + echo $STAT | jq '.FailedTests' || true + echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" + if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi + functional_baremetal_ubuntu18_04: needs: [build_minikube] env: TIME_ELAPSED: time - JOB_NAME: "None_Ubuntu_18_04" + JOB_NAME: "functional_baremetal_ubuntu18_04" GOPOGH_RESULT: "" SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 @@ -427,7 +457,7 @@ jobs: - name: Install gopogh shell: bash run: | - curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64 + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64 sudo install gopogh-linux-amd64 /usr/local/bin/gopogh - name: Download Binaries uses: actions/download-artifact@v1 @@ -444,7 +474,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.v -timeout-multiplier=1.5 -test.run TestFunctional -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -479,153 +509,26 @@ jobs: numPass=$(echo $STAT | jq '.NumberOfPass') echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi - podman_ubuntu_18_04_experimental: - needs: [build_minikube] - env: - TIME_ELAPSED: time - JOB_NAME: "Podman_Ubuntu_18_04" - GOPOGH_RESULT: "" - SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 - runs-on: ubuntu-18.04 - steps: - - name: Install kubectl - shell: bash - run: | - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl - sudo install kubectl /usr/local/bin/kubectl - kubectl version --client=true - - name: Install podman - shell: bash - run: | - . /etc/os-release - sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" - wget -q https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_${VERSION_ID}/Release.key -O- | sudo apt-key add - - sudo apt-key add - < Release.key || true - sudo apt-get update -qq - sudo apt-get -qq -y install podman - sudo podman version || true - sudo podman info || true - - name: Install gopogh - shell: bash - run: | - curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64 - sudo install gopogh-linux-amd64 /usr/local/bin/gopogh - - name: Download binaries - uses: actions/download-artifact@v1 - with: - name: minikube_binaries - - name: Run Integration Test - continue-on-error: true - # bash {0} to allow test to continue to next step. in case of - shell: bash {0} - run: | - cd minikube_binaries - mkdir -p report - mkdir -p testhome - chmod a+x e2e-* - chmod a+x minikube-* - START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=podman -test.timeout=30m -test.v -timeout-multiplier=1 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt - END_TIME=$(date -u +%s) - TIME_ELAPSED=$(($END_TIME-$START_TIME)) - min=$((${TIME_ELAPSED}/60)) - sec=$((${TIME_ELAPSED}%60)) - TIME_ELAPSED="${min} min $sec seconds " - echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED} - - name: Generate HTML Report - shell: bash - run: | - cd minikube_binaries - export PATH=${PATH}:`go env GOPATH`/bin - go tool test2json -t < ./report/testout.txt > ./report/testout.json || true - STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true - echo status: ${STAT} - FailNum=$(echo $STAT | jq '.NumberOfFail') - TestsNum=$(echo $STAT | jq '.NumberOfTests') - GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}" - echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT} - echo ::set-env name=STAT::${STAT} - - uses: actions/upload-artifact@v1 - with: - name: podman_ubuntu_18_04 - path: minikube_binaries/report - - name: The End Result - Podman On Ubuntu 18:04 - shell: bash - run: | - echo ${GOPOGH_RESULT} - numFail=$(echo $STAT | jq '.NumberOfFail') - echo "----------------${numFail} Failures----------------------------" - echo $STAT | jq '.FailedTests' || true - echo "-------------------------------------------------------" - numPass=$(echo $STAT | jq '.NumberOfPass') - echo "*** $numPass Passed ***" - if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi - # After all 4 integration tests finished - # collect all the reports and upload + # After all integration tests finished + # collect all the reports and upload them upload_all_reports: if: always() - needs: [docker_ubuntu_16_04,docker_ubuntu_18_04,none_ubuntu16_04,none_ubuntu18_04,podman_ubuntu_18_04_experimental] + needs: [functional_test_docker_ubuntu, addons_certs_tests_docker_ubuntu, multinode_pause_tests_docker_ubuntu, preload_docker_flags_tests_docker_ubuntu, functional_baremetal_ubuntu18_04] runs-on: ubuntu-18.04 steps: - - name: Download Results docker_ubuntu_16_04 - uses: actions/download-artifact@v1 - with: - name: docker_ubuntu_16_04 - - name: cp docker_ubuntu_16_04 to all_report - continue-on-error: true + - name: download all reports + uses: actions/download-artifact@v2-preview + - name: upload all reports shell: bash {0} - run: | - mkdir -p all_reports - cp -r docker_ubuntu_16_04 ./all_reports/ - - name: Download Results docker_ubuntu_18_04 - uses: actions/download-artifact@v1 - with: - name: docker_ubuntu_18_04 - - name: cp docker_ubuntu_18_04 to all_report continue-on-error: true - shell: bash {0} run: | mkdir -p all_reports - cp -r docker_ubuntu_18_04 ./all_reports/ - - name: download results docker_on_windows - uses: actions/download-artifact@v1 - with: - name: docker_on_windows - - name: cp to all_report - shell: bash - run: | - mkdir -p all_reports - cp -r docker_on_windows ./all_reports/ - - name: Download Results none_ubuntu16_04 - uses: actions/download-artifact@v1 - with: - name: none_ubuntu16_04 - - name: cp none_ubuntu16_04 to all_report - continue-on-error: true - shell: bash {0} - run: | - mkdir -p all_reports - cp -r none_ubuntu16_04 ./all_reports/ - - name: Download Results none_ubuntu18_04 - uses: actions/download-artifact@v1 - with: - name: none_ubuntu18_04 - - name: Copy none_ubuntu18_04 to all_report - continue-on-error: true - shell: bash {0} - run: | - mkdir -p all_reports - cp -r none_ubuntu18_04 ./all_reports/ - - name: Download Results podman_ubuntu_18_04 - uses: actions/download-artifact@v1 - with: - name: podman_ubuntu_18_04 - - name: Copy podman_ubuntu_18_04 to all_report - continue-on-error: true - shell: bash {0} - run: | - mkdir -p all_reports - cp -r podman_ubuntu_18_04 ./all_reports/ + ls -lah + cp -r ./functional_test_docker_ubuntu ./all_reports/ + cp -r ./addons_certs_tests_docker_ubuntu ./all_reports/ + cp -r ./multinode_pause_tests_docker_ubuntu ./all_reports/ + cp -r ./preload_docker_flags_tests_docker_ubuntu ./all_reports/ + cp -r ./functional_baremetal_ubuntu18_04 ./all_reports/ - uses: actions/upload-artifact@v1 with: name: all_reports diff --git a/CHANGELOG.md b/CHANGELOG.md index 11bcc13997..13094b8464 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,72 @@ # Release Notes +## Version 1.10.0-beta.0 - 2020-04-20 + +Improvements: +* faster containerd start by preloading images [#7793](https://github.com/kubernetes/minikube/pull/7793) +* Add fish completion support [#7777](https://github.com/kubernetes/minikube/pull/7777) +* Behavior change: start with no arguments uses existing cluster config [#7449](https://github.com/kubernetes/minikube/pull/7449) +* conformance: add --wait=all, reduce quirks [#7716](https://github.com/kubernetes/minikube/pull/7716) +* Upgrade minimum supported k8s version to v1.12 [#7723](https://github.com/kubernetes/minikube/pull/7723) +* Add default CNI network for running wth podman [#7754](https://github.com/kubernetes/minikube/pull/7754) +* Behavior change: fallback to alternate drivers on failure [#7389](https://github.com/kubernetes/minikube/pull/7389) +* Add registry addon feature for docker on mac/windows [#7603](https://github.com/kubernetes/minikube/pull/7603) +* Check node pressure & new option "node_ready" for --wait flag [#7752](https://github.com/kubernetes/minikube/pull/7752) +* docker driver: Add Service & Tunnel features to windows [#7739](https://github.com/kubernetes/minikube/pull/7739) +* Add master node/worker node type to `minikube status` [#7586](https://github.com/kubernetes/minikube/pull/7586) +* Add new wait component apps_running [#7460](https://github.com/kubernetes/minikube/pull/7460) +* none: Add support for OpenRC init (Google CloudShell) [#7539](https://github.com/kubernetes/minikube/pull/7539) +* Upgrade falco-probe module to version 0.21.0 [#7436](https://github.com/kubernetes/minikube/pull/7436) + +Bug Fixes: +* Fix multinode cluster creation for VM drivers [#7700](https://github.com/kubernetes/minikube/pull/7700) +* tunnel: Fix resolver file permissions, add DNS forwarding test [#7753](https://github.com/kubernetes/minikube/pull/7753) +* unconfine apparmor for kic [#7658](https://github.com/kubernetes/minikube/pull/7658) +* Fix `minikube delete` output nodename missing with docker/podman driver [#7553](https://github.com/kubernetes/minikube/pull/7553) +* Respect driver.FlagDefaults even if --extra-config is set [#7509](https://github.com/kubernetes/minikube/pull/7509) +* remove docker/podman overlay network for docker-runtime [#7425](https://github.com/kubernetes/minikube/pull/7425) + + +Huge thank you for this release towards our contributors: + +- Alonyb +- Anders F Björklund +- Anshul Sirur +- Balint Pato +- Batuhan Apaydın +- Brad Walker +- Frank Schwichtenberg +- Kenta Iso +- Medya Ghazizadeh +- Michael Vorburger ⛑️ +- Pablo Caderno +- Prasad Katti +- Priya Wadhwa +- Radoslaw Smigielski +- Ruben Baez +- Sharif Elgamal +- Thomas Strömberg +- Vikky Omkar +- ZouYu +- gorbondiga +- loftkun +- nestoralonso +- remraz +- sayboras +- tomocy + +Thank you so much to users who helped with community triage: + +- ps-feng +- Prasad Katti + +And big thank you to those who participated in our docs fixit week: + +- matjung +- jlaswell +- remraz + + ## Version 1.9.2 - 2020-04-04 Minor improvements: diff --git a/Makefile b/Makefile index e0fe592f0a..061d62039d 100755 --- a/Makefile +++ b/Makefile @@ -14,8 +14,8 @@ # Bump these on release - and please check ISO_VERSION for correctness. VERSION_MAJOR ?= 1 -VERSION_MINOR ?= 9 -VERSION_BUILD ?= 2 +VERSION_MINOR ?= 10 +VERSION_BUILD ?= 0-beta.0 RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD) VERSION ?= v$(RAW_VERSION) @@ -23,7 +23,7 @@ KUBERNETES_VERSION ?= $(shell egrep "DefaultKubernetesVersion =" pkg/minikube/co KIC_VERSION ?= $(shell egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2) # Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions -ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).0 +ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD) # Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta DEB_VERSION ?= $(subst -,~,$(RAW_VERSION)) RPM_VERSION ?= $(DEB_VERSION) @@ -274,6 +274,10 @@ test: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go ## Tr generate-docs: out/minikube ## Automatically generate commands documentation. out/minikube generate-docs --path ./site/content/en/docs/commands/ +.PHONY: gotest +gotest: $(SOURCE_GENERATED) ## Trigger minikube test + go test -tags "$(MINIKUBE_BUILD_TAGS)" -ldflags="$(MINIKUBE_LDFLAGS)" $(MINIKUBE_TEST_FILES) + .PHONY: extract extract: ## Compile extract tool go run cmd/extract/extract.go @@ -393,6 +397,10 @@ reportcard: ## Run goreportcard for minikube mdlint: @$(MARKDOWNLINT) $(MINIKUBE_MARKDOWN_FILES) +.PHONY: verify-iso +verify-iso: # Make sure the current ISO exists in the expected bucket + gsutil stat gs://$(ISO_BUCKET)/minikube-$(ISO_VERSION).iso + out/docs/minikube.md: $(shell find "cmd") $(shell find "pkg/minikube/constants") pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go go run -ldflags="$(MINIKUBE_LDFLAGS)" -tags gendocs hack/help_text/gen_help_text.go diff --git a/README.md b/README.md index 02308a05e2..79b007159e 100644 --- a/README.md +++ b/README.md @@ -61,8 +61,10 @@ minikube is a Kubernetes [#sig-cluster-lifecycle](https://github.com/kubernetes/ * [**#minikube on Kubernetes Slack**](https://kubernetes.slack.com) - Live chat with minikube developers! * [minikube-users mailing list](https://groups.google.com/forum/#!forum/minikube-users) * [minikube-dev mailing list](https://groups.google.com/forum/#!forum/minikube-dev) -* [Bi-weekly office hours, Mondays @ 11am PST](https://tinyurl.com/minikube-oh) * [Contributing](https://minikube.sigs.k8s.io/docs/contrib/) * [Development Roadmap](https://minikube.sigs.k8s.io/docs/contrib/roadmap/) +Join our meetings: +* [Bi-weekly office hours, Mondays @ 11am PST](https://tinyurl.com/minikube-oh) +* [Triage Party](https://minikube.sigs.k8s.io/docs/contrib/triage/) \ No newline at end of file diff --git a/cmd/minikube/cmd/completion.go b/cmd/minikube/cmd/completion.go index 5c0669d3cd..5320ed165b 100644 --- a/cmd/minikube/cmd/completion.go +++ b/cmd/minikube/cmd/completion.go @@ -28,7 +28,7 @@ import ( ) const longDescription = ` - Outputs minikube shell completion for the given shell (bash or zsh) + Outputs minikube shell completion for the given shell (bash, zsh or fish) This depends on the bash-completion binary. Example installation instructions: OS X: @@ -37,15 +37,18 @@ const longDescription = ` $ minikube completion bash > ~/.minikube-completion # for bash users $ minikube completion zsh > ~/.minikube-completion # for zsh users $ source ~/.minikube-completion + $ minikube completion fish > ~/.config/fish/completions/minikube.fish # for fish users Ubuntu: $ apt-get install bash-completion $ source /etc/bash-completion $ source <(minikube completion bash) # for bash users $ source <(minikube completion zsh) # for zsh users + $ minikube completion fish > ~/.config/fish/completions/minikube.fish # for fish users Additionally, you may want to output the completion to a file and source in your .bashrc Note for zsh users: [1] zsh completions are only supported in versions of zsh >= 5.2 + Note for fish users: [2] please refer to this docs for more details https://fishshell.com/docs/current/#tab-completion ` const boilerPlate = ` @@ -66,24 +69,29 @@ const boilerPlate = ` var completionCmd = &cobra.Command{ Use: "completion SHELL", - Short: "Outputs minikube shell completion for the given shell (bash or zsh)", + Short: "Outputs minikube shell completion for the given shell (bash, zsh or fish)", Long: longDescription, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { exit.UsageT("Usage: minikube completion SHELL") } - if args[0] != "bash" && args[0] != "zsh" { + if args[0] != "bash" && args[0] != "zsh" && args[0] != "fish" { exit.UsageT("Sorry, completion support is not yet implemented for {{.name}}", out.V{"name": args[0]}) } else if args[0] == "bash" { err := GenerateBashCompletion(os.Stdout, cmd.Parent()) if err != nil { exit.WithError("bash completion failed", err) } - } else { + } else if args[0] == "zsh" { err := GenerateZshCompletion(os.Stdout, cmd.Parent()) if err != nil { exit.WithError("zsh completion failed", err) } + } else { + err := GenerateFishCompletion(os.Stdout, cmd.Parent()) + if err != nil { + exit.WithError("fish completion failed", err) + } } }, @@ -279,3 +287,18 @@ __minikube_bash_source <(__minikube_convert_bash_to_zsh) return nil } + +// GenerateBashCompletion generates the completion for the bash shell +func GenerateFishCompletion(w io.Writer, cmd *cobra.Command) error { + _, err := w.Write([]byte(boilerPlate)) + if err != nil { + return err + } + + err = cmd.GenFishCompletion(w, true) + if err != nil { + return errors.Wrap(err, "Error generating fish completion") + } + + return nil +} diff --git a/cmd/minikube/cmd/config/configure.go b/cmd/minikube/cmd/config/configure.go index 412fa1204d..1fcf8e3cb0 100644 --- a/cmd/minikube/cmd/config/configure.go +++ b/cmd/minikube/cmd/config/configure.go @@ -100,8 +100,11 @@ var addonsConfigureCmd = &cobra.Command{ acrPassword = AskForPasswordValue("-- Enter service principal password to access Azure Container Registry: ") } + cname := ClusterFlagValue() + // Create ECR Secret err := service.CreateSecret( + cname, "kube-system", "registry-creds-ecr", map[string]string{ @@ -124,6 +127,7 @@ var addonsConfigureCmd = &cobra.Command{ // Create GCR Secret err = service.CreateSecret( + cname, "kube-system", "registry-creds-gcr", map[string]string{ @@ -142,6 +146,7 @@ var addonsConfigureCmd = &cobra.Command{ // Create Docker Secret err = service.CreateSecret( + cname, "kube-system", "registry-creds-dpr", map[string]string{ @@ -161,6 +166,7 @@ var addonsConfigureCmd = &cobra.Command{ // Create Azure Container Registry Secret err = service.CreateSecret( + cname, "kube-system", "registry-creds-acr", map[string]string{ diff --git a/cmd/minikube/cmd/config/open.go b/cmd/minikube/cmd/config/open.go index b49dce72fe..b5ad2db22e 100644 --- a/cmd/minikube/cmd/config/open.go +++ b/cmd/minikube/cmd/config/open.go @@ -77,7 +77,7 @@ minikube addons enable {{.name}}`, out.V{"name": addonName}) namespace := "kube-system" key := "kubernetes.io/minikube-addons-endpoint" - serviceList, err := service.GetServiceListByLabel(namespace, key, addonName) + serviceList, err := service.GetServiceListByLabel(cname, namespace, key, addonName) if err != nil { exit.WithCodeT(exit.Unavailable, "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}", out.V{"namespace": namespace, "labelName": key, "addonName": addonName, "error": err}) } @@ -89,7 +89,7 @@ You can add one by annotating a service with the label {{.labelName}}:{{.addonNa svc := serviceList.Items[i].ObjectMeta.Name var urlString []string - if urlString, err = service.WaitForService(co.API, namespace, svc, addonsURLTemplate, addonsURLMode, https, wait, interval); err != nil { + if urlString, err = service.WaitForService(co.API, co.Config.Name, namespace, svc, addonsURLTemplate, addonsURLMode, https, wait, interval); err != nil { exit.WithCodeT(exit.Unavailable, "Wait failed: {{.error}}", out.V{"error": err}) } diff --git a/cmd/minikube/cmd/config/profile.go b/cmd/minikube/cmd/config/profile.go index 46afa5237a..182b480600 100644 --- a/cmd/minikube/cmd/config/profile.go +++ b/cmd/minikube/cmd/config/profile.go @@ -44,6 +44,11 @@ var ProfileCmd = &cobra.Command{ } profile := args[0] + // Check whether the profile name is container friendly + if !config.ProfileNameValid(profile) { + out.WarningT("Profile name '{{.profilename}}' is not valid", out.V{"profilename": profile}) + exit.UsageT("Only alphanumeric, dots, underscores and dashes '-' are permitted. Minimum 2 characters, starting by alphanumeric.") + } /** we need to add code over here to check whether the profile name is in the list of reserved keywords diff --git a/cmd/minikube/cmd/dashboard.go b/cmd/minikube/cmd/dashboard.go index 51957e4b58..c83dd210fe 100644 --- a/cmd/minikube/cmd/dashboard.go +++ b/cmd/minikube/cmd/dashboard.go @@ -83,8 +83,9 @@ var dashboardCmd = &cobra.Command{ ns := "kubernetes-dashboard" svc := "kubernetes-dashboard" out.ErrT(out.Verifying, "Verifying dashboard health ...") - checkSVC := func() error { return service.CheckService(ns, svc) } - if err = retry.Expo(checkSVC, 1*time.Second, time.Minute*5); err != nil { + checkSVC := func() error { return service.CheckService(cname, ns, svc) } + // for slow machines or parallels in CI to avoid #7503 + if err = retry.Expo(checkSVC, 100*time.Microsecond, time.Minute*10); err != nil { exit.WithCodeT(exit.Unavailable, "dashboard service is not running: {{.error}}", out.V{"error": err}) } @@ -97,7 +98,7 @@ var dashboardCmd = &cobra.Command{ out.ErrT(out.Verifying, "Verifying proxy health ...") chkURL := func() error { return checkURL(url) } - if err = retry.Expo(chkURL, 1*time.Second, 3*time.Minute); err != nil { + if err = retry.Expo(chkURL, 100*time.Microsecond, 10*time.Minute); err != nil { exit.WithCodeT(exit.Unavailable, "{{.url}} is not accessible: {{.error}}", out.V{"url": url, "error": err}) } diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index b25696ae34..c2b05800aa 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -147,6 +147,8 @@ func runDelete(cmd *cobra.Command, args []string) { out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": cname}) } + deletePossibleKicLeftOver(cname) + errs := DeleteProfiles([]*config.Profile{profile}) if len(errs) > 0 { HandleDeletionErrors(errs) @@ -189,20 +191,30 @@ func DeleteProfiles(profiles []*config.Profile) []error { return errs } -func deleteProfileContainersAndVolumes(name string) { +func deletePossibleKicLeftOver(name string) { delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, name) - errs := oci.DeleteContainersByLabel(oci.Docker, delLabel) - if errs != nil { // it will error if there is no container to delete - glog.Infof("error deleting containers for %s (might be okay):\n%v", name, errs) - } - errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel) - if errs != nil { // it will not error if there is nothing to delete - glog.Warningf("error deleting volumes (might be okay).\nTo see the list of volumes run: 'docker volume ls'\n:%v", errs) - } + for _, bin := range []string{oci.Docker, oci.Podman} { + cs, err := oci.ListContainersByLabel(bin, delLabel) + if err == nil && len(cs) > 0 { + for _, c := range cs { + out.T(out.DeletingHost, `Deleting container "{{.name}}" ...`, out.V{"name": name}) + err := oci.DeleteContainer(bin, c) + if err != nil { // it will error if there is no container to delete + glog.Errorf("error deleting container %q. you might want to delete that manually :\n%v", name, err) + } - errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel) - if len(errs) > 0 { // it will not error if there is nothing to delete - glog.Warningf("error pruning volume (might be okay):\n%v", errs) + } + } + + errs := oci.DeleteAllVolumesByLabel(bin, delLabel) + if errs != nil { // it will not error if there is nothing to delete + glog.Warningf("error deleting volumes (might be okay).\nTo see the list of volumes run: 'docker volume ls'\n:%v", errs) + } + + errs = oci.PruneAllVolumesByLabel(bin, delLabel) + if len(errs) > 0 { // it will not error if there is nothing to delete + glog.Warningf("error pruning volume (might be okay):\n%v", errs) + } } } @@ -212,7 +224,7 @@ func deleteProfile(profile *config.Profile) error { // if driver is oci driver, delete containers and volumes if driver.IsKIC(profile.Config.Driver) { out.T(out.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": profile.Name, "driver_name": profile.Config.Driver}) - deleteProfileContainersAndVolumes(profile.Name) + deletePossibleKicLeftOver(profile.Name) } } diff --git a/cmd/minikube/cmd/delete_test.go b/cmd/minikube/cmd/delete_test.go index de16ed9107..90e0fadc8f 100644 --- a/cmd/minikube/cmd/delete_test.go +++ b/cmd/minikube/cmd/delete_test.go @@ -65,6 +65,14 @@ func TestDeleteProfile(t *testing.T) { if err != nil { t.Fatalf("tempdir: %v", err) } + + defer func() { //clean up tempdir + err := os.RemoveAll(td) + if err != nil { + t.Errorf("failed to clean up temp folder %q", td) + } + }() + err = copy.Copy("../../../pkg/minikube/config/testdata/delete-single", td) if err != nil { t.Fatalf("copy: %v", err) @@ -151,6 +159,13 @@ func TestDeleteAllProfiles(t *testing.T) { if err != nil { t.Fatalf("tempdir: %v", err) } + defer func() { //clean up tempdir + err := os.RemoveAll(td) + if err != nil { + t.Errorf("failed to clean up temp folder %q", td) + } + }() + err = copy.Copy("../../../pkg/minikube/config/testdata/delete-all", td) if err != nil { t.Fatalf("copy: %v", err) diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index 81f9ac6b80..7399e87123 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -75,7 +75,6 @@ var nodeStartCmd = &cobra.Command{ } func init() { - nodeStartCmd.Flags().String("name", "", "The name of the node to start") nodeStartCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.") nodeCmd.AddCommand(nodeStartCmd) } diff --git a/cmd/minikube/cmd/node_stop.go b/cmd/minikube/cmd/node_stop.go index 5dbceba1bc..3f5c8d7433 100644 --- a/cmd/minikube/cmd/node_stop.go +++ b/cmd/minikube/cmd/node_stop.go @@ -53,6 +53,5 @@ var nodeStopCmd = &cobra.Command{ } func init() { - nodeStopCmd.Flags().String("name", "", "The name of the node to delete") nodeCmd.AddCommand(nodeStopCmd) } diff --git a/cmd/minikube/cmd/service.go b/cmd/minikube/cmd/service.go index 039afefc28..fccc771f3a 100644 --- a/cmd/minikube/cmd/service.go +++ b/cmd/minikube/cmd/service.go @@ -33,7 +33,9 @@ import ( "github.com/spf13/cobra" "k8s.io/minikube/pkg/drivers/kic/oci" + "k8s.io/minikube/pkg/kapi" "k8s.io/minikube/pkg/minikube/browser" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/mustload" @@ -78,12 +80,12 @@ var serviceCmd = &cobra.Command{ cname := ClusterFlagValue() co := mustload.Healthy(cname) - if runtime.GOOS == "darwin" && co.Config.Driver == oci.Docker { + if driver.NeedsPortForward(co.Config.Driver) { startKicServiceTunnel(svc, cname) return } - urls, err := service.WaitForService(co.API, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval) + urls, err := service.WaitForService(co.API, co.Config.Name, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval) if err != nil { var s *service.SVCNotFoundError if errors.As(err, &s) { @@ -112,7 +114,7 @@ func startKicServiceTunnel(svc, configName string) { ctrlC := make(chan os.Signal, 1) signal.Notify(ctrlC, os.Interrupt) - clientset, err := service.K8s.GetClientset(1 * time.Second) + clientset, err := kapi.Client(configName) if err != nil { exit.WithError("error creating clientset", err) } @@ -137,7 +139,7 @@ func startKicServiceTunnel(svc, configName string) { service.PrintServiceList(os.Stdout, data) openURLs(svc, urls) - out.WarningT("Because you are using docker driver on Mac, the terminal needs to be open to run it.") + out.WarningT("Because you are using a Docker driver on {{.operating_system}}, the terminal needs to be open to run it.", out.V{"operating_system": runtime.GOOS}) <-ctrlC diff --git a/cmd/minikube/cmd/service_list.go b/cmd/minikube/cmd/service_list.go index 7b17ff3c1e..c3837712be 100644 --- a/cmd/minikube/cmd/service_list.go +++ b/cmd/minikube/cmd/service_list.go @@ -40,7 +40,7 @@ var serviceListCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { co := mustload.Healthy(ClusterFlagValue()) - serviceURLs, err := service.GetServiceURLs(co.API, serviceListNamespace, serviceURLTemplate) + serviceURLs, err := service.GetServiceURLs(co.API, co.Config.Name, serviceListNamespace, serviceURLTemplate) if err != nil { out.FatalT("Failed to get service URL: {{.error}}", out.V{"error": err}) out.ErrT(out.Notice, "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.") diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 7855e91085..edd592b1e8 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -144,6 +144,10 @@ func runStart(cmd *cobra.Command, args []string) { registryMirror = viper.GetStringSlice("registry_mirror") } + if !config.ProfileNameValid(ClusterFlagValue()) { + out.WarningT("Profile name '{{.name}}' is not valid", out.V{"name": ClusterFlagValue()}) + exit.UsageT("Only alphanumeric, dots, underscores and dashes '-' are permitted. Minimum 2 characters, starting by alphanumeric.") + } existing, err := config.Load(ClusterFlagValue()) if err != nil && !config.IsNotExist(err) { exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err}) diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index 5b768a5267..c735e8025d 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -255,7 +255,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k if strings.ToLower(repository) == "auto" || mirrorCountry != "" { found, autoSelectedRepository, err := selectImageRepository(mirrorCountry, semver.MustParse(strings.TrimPrefix(k8sVersion, version.VersionPrefix))) if err != nil { - exit.WithError("Failed to check main repository and mirrors for images for images", err) + exit.WithError("Failed to check main repository and mirrors for images", err) } if !found { @@ -269,7 +269,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k repository = autoSelectedRepository } - if cmd.Flags().Changed(imageRepository) { + if cmd.Flags().Changed(imageRepository) || cmd.Flags().Changed(imageMirrorCountry) { out.T(out.SuccessType, "Using image repository {{.name}}", out.V{"name": repository}) } diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index a95c7978cb..c63ab1b7bc 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -73,6 +73,7 @@ const ( clusterNotRunningStatusFlag = 1 << 1 k8sNotRunningStatusFlag = 1 << 2 defaultStatusFormat = `{{.Name}} +type: Control Plane host: {{.Host}} kubelet: {{.Kubelet}} apiserver: {{.APIServer}} @@ -80,6 +81,7 @@ kubeconfig: {{.Kubeconfig}} ` workerStatusFormat = `{{.Name}} +type: Worker host: {{.Host}} kubelet: {{.Kubelet}} @@ -102,12 +104,11 @@ var statusCmd = &cobra.Command{ cname := ClusterFlagValue() api, cc := mustload.Partial(cname) - var st *Status - var err error + var statuses []*Status for _, n := range cc.Nodes { glog.Infof("checking status of %s ...", n.Name) machineName := driver.MachineName(*cc, n) - st, err = status(api, *cc, n) + st, err := status(api, *cc, n) glog.Infof("%s status: %+v", machineName, st) if err != nil { @@ -116,36 +117,40 @@ var statusCmd = &cobra.Command{ if st.Host == Nonexistent { glog.Errorf("The %q host does not exist!", machineName) } + statuses = append(statuses, st) + } - switch strings.ToLower(output) { - case "text": + switch strings.ToLower(output) { + case "text": + for _, st := range statuses { if err := statusText(st, os.Stdout); err != nil { exit.WithError("status text failure", err) } - case "json": - if err := statusJSON(st, os.Stdout); err != nil { - exit.WithError("status json failure", err) - } - default: - exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output)) } + case "json": + if err := statusJSON(statuses, os.Stdout); err != nil { + exit.WithError("status json failure", err) + } + default: + exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output)) } - // TODO: Update for multi-node - os.Exit(exitCode(st)) + os.Exit(exitCode(statuses)) }, } -func exitCode(st *Status) int { +func exitCode(statuses []*Status) int { c := 0 - if st.Host != state.Running.String() { - c |= minikubeNotRunningStatusFlag - } - if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() { - c |= clusterNotRunningStatusFlag - } - if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant { - c |= k8sNotRunningStatusFlag + for _, st := range statuses { + if st.Host != state.Running.String() { + c |= minikubeNotRunningStatusFlag + } + if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() { + c |= clusterNotRunningStatusFlag + } + if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant { + c |= k8sNotRunningStatusFlag + } } return c } @@ -268,8 +273,15 @@ func statusText(st *Status, w io.Writer) error { return nil } -func statusJSON(st *Status, w io.Writer) error { - js, err := json.Marshal(st) +func statusJSON(st []*Status, w io.Writer) error { + var js []byte + var err error + // Keep backwards compat with single node clusters to not break anyone + if len(st) == 1 { + js, err = json.Marshal(st[0]) + } else { + js, err = json.Marshal(st) + } if err != nil { return err } diff --git a/cmd/minikube/cmd/status_test.go b/cmd/minikube/cmd/status_test.go index b11e549a6d..8bae781037 100644 --- a/cmd/minikube/cmd/status_test.go +++ b/cmd/minikube/cmd/status_test.go @@ -35,7 +35,7 @@ func TestExitCode(t *testing.T) { } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - got := exitCode(tc.state) + got := exitCode([]*Status{tc.state}) if got != tc.want { t.Errorf("exitcode(%+v) = %d, want: %d", tc.state, got, tc.want) } @@ -52,17 +52,17 @@ func TestStatusText(t *testing.T) { { name: "ok", state: &Status{Name: "minikube", Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured}, - want: "minikube\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n\n", + want: "minikube\ntype: Control Plane\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n\n", }, { name: "paused", state: &Status{Name: "minikube", Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured}, - want: "minikube\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n\n", + want: "minikube\ntype: Control Plane\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n\n", }, { name: "down", state: &Status{Name: "minikube", Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured}, - want: "minikube\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", + want: "minikube\ntype: Control Plane\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", }, } for _, tc := range tests { @@ -93,7 +93,7 @@ func TestStatusJSON(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { var b bytes.Buffer - err := statusJSON(tc.state, &b) + err := statusJSON([]*Status{tc.state}, &b) if err != nil { t.Errorf("json(%+v) error: %v", tc.state, err) } diff --git a/cmd/minikube/cmd/tunnel.go b/cmd/minikube/cmd/tunnel.go index dbf66cf110..6474e8fbf7 100644 --- a/cmd/minikube/cmd/tunnel.go +++ b/cmd/minikube/cmd/tunnel.go @@ -21,19 +21,18 @@ import ( "os" "os/signal" "path/filepath" - "runtime" "strconv" - "time" "github.com/golang/glog" "github.com/spf13/cobra" "k8s.io/minikube/pkg/drivers/kic/oci" + "k8s.io/minikube/pkg/kapi" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/mustload" - "k8s.io/minikube/pkg/minikube/service" "k8s.io/minikube/pkg/minikube/tunnel" "k8s.io/minikube/pkg/minikube/tunnel/kic" ) @@ -65,7 +64,7 @@ var tunnelCmd = &cobra.Command{ // We define the tunnel and minikube error free if the API server responds within a second. // This also contributes to better UX, the tunnel status check can happen every second and // doesn't hang on the API server call during startup and shutdown time or if there is a temporary error. - clientset, err := service.K8s.GetClientset(1 * time.Second) + clientset, err := kapi.Client(cname) if err != nil { exit.WithError("error creating clientset", err) } @@ -78,7 +77,8 @@ var tunnelCmd = &cobra.Command{ cancel() }() - if runtime.GOOS == "darwin" && co.Config.Driver == oci.Docker { + if driver.NeedsPortForward(co.Config.Driver) { + port, err := oci.ForwardedPort(oci.Docker, cname, 22) if err != nil { exit.WithError("error getting ssh port", err) diff --git a/cmd/performance/mkcmp/cmd/mkcmp.go b/cmd/performance/mkcmp/cmd/mkcmp.go index 57c5715f4a..2a40dd9d5f 100644 --- a/cmd/performance/mkcmp/cmd/mkcmp.go +++ b/cmd/performance/mkcmp/cmd/mkcmp.go @@ -35,7 +35,11 @@ var rootCmd = &cobra.Command{ return validateArgs(args) }, RunE: func(cmd *cobra.Command, args []string) error { - return perf.CompareMinikubeStart(context.Background(), os.Stdout, args) + binaries, err := retrieveBinaries(args) + if err != nil { + return err + } + return perf.CompareMinikubeStart(context.Background(), os.Stdout, binaries) }, } @@ -46,6 +50,18 @@ func validateArgs(args []string) error { return nil } +func retrieveBinaries(args []string) ([]*perf.Binary, error) { + binaries := []*perf.Binary{} + for _, a := range args { + binary, err := perf.NewBinary(a) + if err != nil { + return nil, err + } + binaries = append(binaries, binary) + } + return binaries, nil +} + // Execute runs the mkcmp command func Execute() { if err := rootCmd.Execute(); err != nil { diff --git a/default.profraw b/default.profraw new file mode 100644 index 0000000000..e69de29bb2 diff --git a/deploy/iso/minikube-iso/README.md b/deploy/iso/minikube-iso/README.md index cbcf617e26..6175120032 100644 --- a/deploy/iso/minikube-iso/README.md +++ b/deploy/iso/minikube-iso/README.md @@ -1 +1 @@ -The documentation for building and hacking on the minikube ISO can be found at [/docs/contributors/minikube_iso.md](/docs/contributors/minikube_iso.md). +The documentation for building and hacking on the minikube ISO can be found [here](https://minikube.sigs.k8s.io/docs/contrib/building/iso/). diff --git a/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig b/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig index 9611b6a9ac..21f310fb0c 100644 --- a/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig +++ b/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig @@ -114,6 +114,7 @@ CONFIG_TCP_CONG_ADVANCED=y CONFIG_TCP_MD5SIG=y CONFIG_INET6_AH=y CONFIG_INET6_ESP=y +CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_NETLABEL=y CONFIG_NETFILTER=y CONFIG_NETFILTER_NETLINK_ACCT=y @@ -351,6 +352,7 @@ CONFIG_NETCONSOLE=y CONFIG_TUN=y CONFIG_VETH=y CONFIG_VIRTIO_NET=y +CONFIG_NET_VRF=m CONFIG_AMD8111_ETH=m CONFIG_PCNET32=m CONFIG_PCMCIA_NMCLAN=m diff --git a/deploy/iso/minikube-iso/package/automount/minikube-automount b/deploy/iso/minikube-iso/package/automount/minikube-automount index 17fc8b0279..77e5fda1fe 100755 --- a/deploy/iso/minikube-iso/package/automount/minikube-automount +++ b/deploy/iso/minikube-iso/package/automount/minikube-automount @@ -109,6 +109,10 @@ if [ -n "$BOOT2DOCKER_DATA" ]; then mkdir /var/log mount --bind /mnt/$PARTNAME/var/log /var/log + mkdir -p /mnt/$PARTNAME/var/tmp + mkdir /var/tmp + mount --bind /mnt/$PARTNAME/var/tmp /var/tmp + mkdir -p /mnt/$PARTNAME/var/lib/kubelet mkdir /var/lib/kubelet mount --bind /mnt/$PARTNAME/var/lib/kubelet /var/lib/kubelet diff --git a/deploy/iso/minikube-iso/package/podman/podman.mk b/deploy/iso/minikube-iso/package/podman/podman.mk index 8781c0b0b9..fb12281c9d 100644 --- a/deploy/iso/minikube-iso/package/podman/podman.mk +++ b/deploy/iso/minikube-iso/package/podman/podman.mk @@ -29,6 +29,8 @@ endef define PODMAN_INSTALL_TARGET_CMDS $(INSTALL) -Dm755 $(@D)/bin/podman $(TARGET_DIR)/usr/bin/podman + $(INSTALL) -d -m 755 $(TARGET_DIR)/etc/cni/net.d/ + $(INSTALL) -m 644 cni/87-podman-bridge.conflist $(TARGET_DIR)/etc/cni/net.d/87-podman-bridge.conflist endef $(eval $(generic-package)) diff --git a/go.mod b/go.mod index 62c03dbf00..e8000701b0 100644 --- a/go.mod +++ b/go.mod @@ -70,7 +70,7 @@ require ( github.com/samalba/dockerclient v0.0.0-20160414174713-91d7393ff859 // indirect github.com/shirou/gopsutil v2.18.12+incompatible github.com/spf13/cast v1.3.1 // indirect - github.com/spf13/cobra v0.0.5 + github.com/spf13/cobra v1.0.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.6.1 github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect diff --git a/go.sum b/go.sum index d9235acee9..e515f17ba5 100644 --- a/go.sum +++ b/go.sum @@ -156,6 +156,8 @@ github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= @@ -706,6 +708,8 @@ github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNue github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.3-0.20200218234912-41c5fccfd6f6 h1:tlXG832s5pa9x9Gs3Rp2rTvEqjiDEuETUOSfBEiTcns= github.com/russross/blackfriday v1.5.3-0.20200218234912-41c5fccfd6f6/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sayboras/dockerclient v0.0.0-20191231050035-015626177a97 h1:DWY4yZN6w+FSKMeqBBXaalT8zmCn4DVwBGopShnlwFE= @@ -722,6 +726,8 @@ github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+D github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -750,6 +756,8 @@ github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3 github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -764,6 +772,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.6.1 h1:VPZzIkznI1YhVMRi6vNFLHSwhnhReBfgTxIPccpfdZk= github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= diff --git a/hack/conformance_tests.sh b/hack/conformance_tests.sh index 3de828e59e..b4509e8e9c 100755 --- a/hack/conformance_tests.sh +++ b/hack/conformance_tests.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # Copyright 2019 The Kubernetes Authors All rights reserved. # @@ -27,15 +27,16 @@ set -ex -o pipefail readonly PROFILE_NAME="k8sconformance" readonly MINIKUBE=${1:-./out/minikube} shift || true -readonly START_ARGS=$* # Requires a fully running Kubernetes cluster. "${MINIKUBE}" delete -p "${PROFILE_NAME}" || true -"${MINIKUBE}" start -p "${PROFILE_NAME}" $START_ARGS +"${MINIKUBE}" start -p "${PROFILE_NAME}" --wait=all +kubectl --context "${PROFILE_NAME}" get pods --all-namespaces "${MINIKUBE}" status -p "${PROFILE_NAME}" -kubectl get pods --all-namespaces -go get -u -v github.com/heptio/sonobuoy +go get -u -v github.com/vmware-tanzu/sonobuoy + + sonobuoy run --wait outdir="$(mktemp -d)" sonobuoy retrieve "${outdir}" @@ -47,8 +48,8 @@ mkdir ./results; tar xzf *.tar.gz -C ./results version=$(${MINIKUBE} version | cut -d" " -f3) -mkdir minikube-${version} -cd minikube-${version} +mkdir "minikube-${version}" +cd "minikube-${version}" cat <PRODUCT.yaml vendor: minikube @@ -68,4 +69,4 @@ EOF cp ../results/plugins/e2e/results/* . cd .. -cp -r minikube-${version} ${cwd} +cp -r "minikube-${version}" "${cwd}" diff --git a/hack/jenkins/common.sh b/hack/jenkins/common.sh index 3035caae53..309a89d8a5 100755 --- a/hack/jenkins/common.sh +++ b/hack/jenkins/common.sh @@ -338,9 +338,9 @@ fi echo ">> Installing gopogh" if [ "$(uname)" != "Darwin" ]; then - curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64 && sudo install gopogh-linux-amd64 /usr/local/bin/gopogh + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64 && sudo install gopogh-linux-amd64 /usr/local/bin/gopogh else - curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-darwin-amd64 && sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-darwin-amd64 && sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh fi echo ">> Running gopogh" diff --git a/hack/jenkins/linux_integration_tests_podman.sh b/hack/jenkins/linux_integration_tests_podman.sh index 1dfbdb4456..f6c704643f 100755 --- a/hack/jenkins/linux_integration_tests_podman.sh +++ b/hack/jenkins/linux_integration_tests_podman.sh @@ -27,7 +27,7 @@ set -e OS_ARCH="linux-amd64" VM_DRIVER="podman" -JOB_NAME="Podman_Linux" +JOB_NAME="Experimental_Podman_Linux" mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES" sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP" diff --git a/hack/jenkins/minikube_set_pending.sh b/hack/jenkins/minikube_set_pending.sh index 1d91deaa31..8ce6a0135f 100755 --- a/hack/jenkins/minikube_set_pending.sh +++ b/hack/jenkins/minikube_set_pending.sh @@ -42,6 +42,7 @@ jobs=( 'none_Linux' 'Docker_Linux' 'Docker_macOS' + 'Docker_Windows' 'Podman_Linux' ) diff --git a/hack/jenkins/osx_integration_tests_docker.sh b/hack/jenkins/osx_integration_tests_docker.sh index c2865d83ff..acfd2e79cc 100755 --- a/hack/jenkins/osx_integration_tests_docker.sh +++ b/hack/jenkins/osx_integration_tests_docker.sh @@ -33,8 +33,13 @@ EXTRA_START_ARGS="" EXPECTED_DEFAULT_DRIVER="hyperkit" -# restart docker on mac for a fresh test -osascript -e 'quit app "Docker"'; open -a Docker ; while [ -z "$(docker info 2> /dev/null )" ]; do printf "."; sleep 1; done; echo "" || true +# fix mac os as a service on mac os +# https://github.com/docker/for-mac/issues/882#issuecomment-506372814 +osascript -e 'quit app "Docker"'; +sudo /Applications/Docker.app/Contents/MacOS/Docker --quit-after-install --unattended || true +# repeating without sudo because https://github.com/docker/for-mac/issues/882#issuecomment-516946766 +/Applications/Docker.app/Contents/MacOS/Docker --quit-after-install --unattended || true +osascript -e 'quit app "Docker"'; /Applications/Docker.app/Contents/MacOS/Docker --unattended &; while [ -z "$(docker info 2> /dev/null )" ]; do printf "."; sleep 1; done; echo "" || true mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES" install cron/cleanup_and_reboot_Darwin.sh $HOME/cleanup_and_reboot.sh || echo "FAILED TO INSTALL CLEANUP" diff --git a/hack/jenkins/release_build_and_upload.sh b/hack/jenkins/release_build_and_upload.sh index 6e76c4c4a1..71cf239516 100755 --- a/hack/jenkins/release_build_and_upload.sh +++ b/hack/jenkins/release_build_and_upload.sh @@ -38,6 +38,10 @@ grep -E "^VERSION_BUILD \\?=" Makefile | grep "${VERSION_BUILD}" # Force go packages to the Jekins home directory export GOPATH=$HOME/go +# Verify ISO exists +echo "Verifying ISO exists ..." +make verify-iso + # Build and upload env BUILD_IN_DOCKER=y \ make -j 16 \ diff --git a/hack/jenkins/windows_integration_test_docker.ps1 b/hack/jenkins/windows_integration_test_docker.ps1 new file mode 100644 index 0000000000..b30125c27d --- /dev/null +++ b/hack/jenkins/windows_integration_test_docker.ps1 @@ -0,0 +1,34 @@ +# Copyright 2019 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +mkdir -p out +gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/minikube-windows-amd64.exe out/ +gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/e2e-windows-amd64.exe out/ +gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata . + +./out/minikube-windows-amd64.exe delete --all + +out/e2e-windows-amd64.exe -minikube-start-args="--driver=docker" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m +$env:result=$lastexitcode +# If the last exit code was 0->success, x>0->error +If($env:result -eq 0){$env:status="success"} +Else {$env:status="failure"} + +# $env:SHORT_COMMIT=$env:COMMIT.substring(0, 7) +# to be used later to implement https://github.com/kubernetes/minikube/issues/6593 +$env:target_url="https://storage.googleapis.com/minikube-builds/logs/$env:MINIKUBE_LOCATION/Docker_Windows.txt" +$json = "{`"state`": `"$env:status`", `"description`": `"Jenkins`", `"target_url`": `"$env:target_url`", `"context`": `"Docker_Windows`"}" +Invoke-WebRequest -Uri "https://api.github.com/repos/kubernetes/minikube/statuses/$env:COMMIT`?access_token=$env:access_token" -Body $json -ContentType "application/json" -Method Post -usebasicparsing + +Exit $env:result \ No newline at end of file diff --git a/hack/jenkins/windows_integration_test_hyperv.ps1 b/hack/jenkins/windows_integration_test_hyperv.ps1 index 536c4e35cc..45cd4f92d0 100644 --- a/hack/jenkins/windows_integration_test_hyperv.ps1 +++ b/hack/jenkins/windows_integration_test_hyperv.ps1 @@ -17,9 +17,9 @@ gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/minikube-windows-am gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/e2e-windows-amd64.exe out/ gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata . -./out/minikube-windows-amd64.exe delete +./out/minikube-windows-amd64.exe delete --all -out/e2e-windows-amd64.exe -minikube-start-args="--driver=hyperv --hyperv-virtual-switch=primary-virtual-switch" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m +out/e2e-windows-amd64.exe -minikube-start-args="--driver=hyperv" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m $env:result=$lastexitcode # If the last exit code was 0->success, x>0->error If($env:result -eq 0){$env:status="success"} diff --git a/hack/preload-images/generate.go b/hack/preload-images/generate.go index 1a22e7404d..a85ff36349 100644 --- a/hack/preload-images/generate.go +++ b/hack/preload-images/generate.go @@ -21,6 +21,7 @@ import ( "os" "os/exec" "path/filepath" + "time" "github.com/pkg/errors" "k8s.io/minikube/pkg/drivers/kic" @@ -29,18 +30,15 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/sysinit" + "k8s.io/minikube/pkg/util" + "k8s.io/minikube/pkg/util/retry" ) func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string) error { - defer func() { - if err := deleteMinikube(); err != nil { - fmt.Println(err) - } - }() - driver := kic.NewDriver(kic.Config{ KubernetesVersion: kubernetesVersion, ContainerRuntime: driver.Docker, @@ -68,44 +66,94 @@ func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string if err != nil { return errors.Wrap(err, "kubeadm images") } - if containerRuntime != "docker" { // kic overlay image is only needed by containerd and cri-o https://github.com/kubernetes/minikube/issues/7428 imgs = append(imgs, kic.OverlayImage) } + runner := command.NewKICRunner(profile, driver.OCIBinary) + + // will need to do this to enable the container run-time service + sv, err := util.ParseKubernetesVersion(kubernetesVersion) + if err != nil { + return errors.Wrap(err, "Failed to parse kubernetes version") + } + + co := cruntime.Config{ + Type: containerRuntime, + Runner: runner, + ImageRepository: "", + KubernetesVersion: sv, // this is just to satisfy cruntime and shouldnt matter what version. + } + cr, err := cruntime.New(co) + if err != nil { + return errors.Wrap(err, "failed create new runtime") + } + if err := cr.Enable(true); err != nil { + return errors.Wrap(err, "enable container runtime") + } + for _, img := range imgs { - cmd := exec.Command("docker", "exec", profile, "docker", "pull", img) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return errors.Wrapf(err, "downloading %s", img) + pull := func() error { + cmd := imagePullCommand(containerRuntime, img) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + time.Sleep(time.Second) // to avoid error: : exec: already started + return errors.Wrapf(err, "pulling image %s", img) + } + return nil } + // retry up to 5 times if network is bad + if err = retry.Expo(pull, time.Microsecond, time.Minute, 5); err != nil { + return errors.Wrapf(err, "pull image %s", img) + } + } // Transfer in k8s binaries kcfg := config.KubernetesConfig{ KubernetesVersion: kubernetesVersion, } - runner := command.NewKICRunner(profile, driver.OCIBinary) + sm := sysinit.New(runner) if err := bsutil.TransferBinaries(kcfg, runner, sm); err != nil { return errors.Wrap(err, "transferring k8s binaries") } // Create image tarball - if err := createImageTarball(tarballFilename); err != nil { + if err := createImageTarball(tarballFilename, containerRuntime); err != nil { return errors.Wrap(err, "create tarball") } + return copyTarballToHost(tarballFilename) } -func createImageTarball(tarballFilename string) error { +// returns the right command to pull image for a specific runtime +func imagePullCommand(containerRuntime, img string) *exec.Cmd { + if containerRuntime == "docker" { + return exec.Command("docker", "exec", profile, "docker", "pull", img) + } + + if containerRuntime == "containerd" { + return exec.Command("docker", "exec", profile, "sudo", "crictl", "pull", img) + } + return nil +} + +func createImageTarball(tarballFilename, containerRuntime string) error { // directories to save into tarball dirs := []string{ - fmt.Sprintf("./lib/docker/%s", dockerStorageDriver), - "./lib/docker/image", "./lib/minikube/binaries", } + + if containerRuntime == "docker" { + dirs = append(dirs, fmt.Sprintf("./lib/docker/%s", dockerStorageDriver), "./lib/docker/image") + } + + if containerRuntime == "containerd" { + dirs = append(dirs, fmt.Sprintf("./lib/containerd")) + } + args := []string{"exec", profile, "sudo", "tar", "-I", "lz4", "-C", "/var", "-cvf", tarballFilename} args = append(args, dirs...) cmd := exec.Command("docker", args...) @@ -127,7 +175,7 @@ func copyTarballToHost(tarballFilename string) error { } func deleteMinikube() error { - cmd := exec.Command(minikubePath, "delete", "-p", profile) + cmd := exec.Command(minikubePath, "delete", "-p", profile) // to avoid https://github.com/kubernetes/minikube/issues/7814 cmd.Stdout = os.Stdout return cmd.Run() } diff --git a/hack/preload-images/preload_images.go b/hack/preload-images/preload_images.go index 37bbd678e7..6fc6367b4f 100644 --- a/hack/preload-images/preload_images.go +++ b/hack/preload-images/preload_images.go @@ -20,12 +20,13 @@ import ( "bytes" "flag" "fmt" + "os" "os/exec" + "runtime/debug" "strings" "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/download" - "k8s.io/minikube/pkg/minikube/exit" ) const ( @@ -35,7 +36,7 @@ const ( var ( dockerStorageDriver = "overlay2" - containerRuntimes = []string{"docker"} + containerRuntimes = []string{"docker", "containerd"} k8sVersion string k8sVersions []string ) @@ -50,14 +51,24 @@ func init() { } func main() { + defer func() { + if err := deleteMinikube(); err != nil { + fmt.Printf("error cleaning up minikube: %v \n", err) + } + }() + + if err := deleteMinikube(); err != nil { + fmt.Printf("error cleaning up minikube at start up: %v \n", err) + } + if err := verifyDockerStorage(); err != nil { - exit.WithError("Docker storage type is incompatible: %v\n", err) + exit("Docker storage type is incompatible: %v \n", err) } if k8sVersions == nil { var err error k8sVersions, err = RecentK8sVersions() if err != nil { - exit.WithError("Unable to get recent k8s versions: %v\n", err) + exit("Unable to get recent k8s versions: %v\n", err) } } @@ -65,16 +76,21 @@ func main() { for _, cr := range containerRuntimes { tf := download.TarballName(kv, cr) if download.PreloadExists(kv, cr) { - fmt.Printf("A preloaded tarball for k8s version %s already exists, skipping generation.\n", kv) + fmt.Printf("A preloaded tarball for k8s version %s - runtime %q already exists, skipping generation.\n", kv, cr) continue } - fmt.Printf("A preloaded tarball for k8s version %s doesn't exist, generating now...\n", kv) + fmt.Printf("A preloaded tarball for k8s version %s - runtime %q doesn't exist, generating now...\n", kv, cr) if err := generateTarball(kv, cr, tf); err != nil { - exit.WithError(fmt.Sprintf("generating tarball for k8s version %s with %s", kv, cr), err) + exit(fmt.Sprintf("generating tarball for k8s version %s with %s", kv, cr), err) } if err := uploadTarball(tf); err != nil { - exit.WithError(fmt.Sprintf("uploading tarball for k8s version %s with %s", kv, cr), err) + exit(fmt.Sprintf("uploading tarball for k8s version %s with %s", kv, cr), err) } + + if err := deleteMinikube(); err != nil { + fmt.Printf("error cleaning up minikube before finishing up: %v\n", err) + } + } } } @@ -93,3 +109,12 @@ func verifyDockerStorage() error { } return nil } + +// exit will exit and clean up minikube +func exit(msg string, err error) { + fmt.Printf("WithError(%s)=%v called from:\n%s", msg, err, debug.Stack()) + if err := deleteMinikube(); err != nil { + fmt.Printf("error cleaning up minikube at start up: %v\n", err) + } + os.Exit(60) +} diff --git a/pkg/addons/addons.go b/pkg/addons/addons.go index b39ee3f601..b76ad996ba 100644 --- a/pkg/addons/addons.go +++ b/pkg/addons/addons.go @@ -28,9 +28,12 @@ import ( "github.com/golang/glog" "github.com/pkg/errors" + + "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" @@ -176,6 +179,17 @@ https://github.com/kubernetes/minikube/issues/7332`, out.V{"driver_name": cc.Dri return nil } + if name == "registry" { + if driver.NeedsPortForward(cc.Driver) { + port, err := oci.ForwardedPort(cc.Driver, cc.Name, constants.RegistryAddonPort) + if err != nil { + return errors.Wrap(err, "registry port") + } + out.T(out.Tip, `Registry addon on with {{.driver}} uses {{.port}} please use that instead of default 5000`, out.V{"driver": cc.Driver, "port": port}) + out.T(out.Documentation, `For more information see: https://minikube.sigs.k8s.io/docs/drivers/{{.driver}}`, out.V{"driver": cc.Driver}) + } + } + cmd, err := machine.CommandRunner(host) if err != nil { return errors.Wrap(err, "command runner") @@ -244,7 +258,7 @@ func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon, return err } - return retry.Expo(apply, 1*time.Second, time.Second*30) + return retry.Expo(apply, 100*time.Microsecond, time.Minute) } // enableOrDisableStorageClasses enables or disables storage classes @@ -259,10 +273,6 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st if name == "storage-provisioner-gluster" { class = "glusterfile" } - storagev1, err := storageclass.GetStoragev1() - if err != nil { - return errors.Wrapf(err, "Error getting storagev1 interface %v ", err) - } api, err := machine.NewAPIClient() if err != nil { @@ -279,6 +289,11 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st return enableOrDisableAddon(cc, name, val) } + storagev1, err := storageclass.GetStoragev1(cc.Name) + if err != nil { + return errors.Wrapf(err, "Error getting storagev1 interface %v ", err) + } + if enable { // Only StorageClass for 'name' should be marked as default err = storageclass.SetDefaultStorageClass(storagev1, class) @@ -332,7 +347,9 @@ func Start(wg *sync.WaitGroup, cc *config.ClusterConfig, toEnable map[string]boo var awg sync.WaitGroup - out.T(out.AddonEnable, "Enabling addons: {{.addons}}", out.V{"addons": strings.Join(toEnableList, ", ")}) + defer func() { // making it show after verifications( not perfect till #7613 is closed) + out.T(out.AddonEnable, "Enabled addons: {{.addons}}", out.V{"addons": strings.Join(toEnableList, ", ")}) + }() for _, a := range toEnableList { awg.Add(1) go func(name string) { diff --git a/pkg/drivers/hyperkit/iso_test.go b/pkg/drivers/hyperkit/iso_test.go index 8dda1e4a6e..d3999add80 100644 --- a/pkg/drivers/hyperkit/iso_test.go +++ b/pkg/drivers/hyperkit/iso_test.go @@ -27,7 +27,12 @@ func TestExtractFile(t *testing.T) { if nil != err { return } - defer os.Remove(testDir) + defer func() { //clean up tempdir + err := os.RemoveAll(testDir) + if err != nil { + t.Errorf("failed to clean up temp folder %q", testDir) + } + }() tests := []struct { name string diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index 0378c78626..0c1a4e4f48 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -26,7 +26,6 @@ import ( "time" "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/log" "github.com/docker/machine/libmachine/ssh" "github.com/docker/machine/libmachine/state" "github.com/golang/glog" @@ -39,6 +38,7 @@ import ( "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/sysinit" + "k8s.io/minikube/pkg/util/retry" ) // Driver represents a kic driver https://minikube.sigs.k8s.io/docs/reference/drivers/docker @@ -93,6 +93,10 @@ func (d *Driver) Create() error { ListenAddress: oci.DefaultBindIPV4, ContainerPort: constants.DockerDaemonPort, }, + oci.PortMapping{ + ListenAddress: oci.DefaultBindIPV4, + ContainerPort: constants.RegistryAddonPort, + }, ) exists, err := oci.ContainerExists(d.OCIBinary, params.Name) @@ -126,7 +130,7 @@ func (d *Driver) Create() error { return } t := time.Now() - glog.Infof("Starting extracting preloaded images to volume") + glog.Infof("Starting extracting preloaded images to volume ...") // Extract preloaded images to container if err := oci.ExtractTarballToVolume(download.TarballPath(d.NodeConfig.KubernetesVersion, d.NodeConfig.ContainerRuntime), params.Name, BaseImage); err != nil { glog.Infof("Unable to extract preloaded tarball to volume: %v", err) @@ -259,9 +263,14 @@ func (d *Driver) Kill() error { if err := sysinit.New(d.exec).ForceStop("kubelet"); err != nil { glog.Warningf("couldn't force stop kubelet. will continue with kill anyways: %v", err) } - cmd := exec.Command(d.NodeConfig.OCIBinary, "kill", d.MachineName) - if err := cmd.Run(); err != nil { - return errors.Wrapf(err, "killing kic node %s", d.MachineName) + + if err := oci.ShutDown(d.OCIBinary, d.MachineName); err != nil { + glog.Warningf("couldn't shutdown the container, will continue with kill anyways: %v", err) + } + + cr := command.NewExecRunner() // using exec runner for interacting with dameon. + if _, err := cr.RunCmd(exec.Command(d.NodeConfig.OCIBinary, "kill", d.MachineName)); err != nil { + return errors.Wrapf(err, "killing %q", d.MachineName) } return nil } @@ -269,16 +278,22 @@ func (d *Driver) Kill() error { // Remove will delete the Kic Node Container func (d *Driver) Remove() error { if _, err := oci.ContainerID(d.OCIBinary, d.MachineName); err != nil { - log.Warnf("could not find the container %s to remove it.", d.MachineName) + glog.Infof("could not find the container %s to remove it. will try anyways", d.MachineName) } - cmd := exec.Command(d.NodeConfig.OCIBinary, "rm", "-f", "-v", d.MachineName) - o, err := cmd.CombinedOutput() - out := strings.TrimSpace(string(o)) - if err != nil { - if strings.Contains(out, "is already in progress") { - log.Warnf("Docker engine is stuck. please restart docker daemon on your computer.", d.MachineName) + + if err := oci.DeleteContainer(d.NodeConfig.OCIBinary, d.MachineName); err != nil { + if strings.Contains(err.Error(), "is already in progress") { + return errors.Wrap(err, "stuck delete") } - return errors.Wrapf(err, "removing container %s, output %s", d.MachineName, out) + if strings.Contains(err.Error(), "No such container:") { + return nil // nothing was found to delete. + } + + } + + // check there be no container left after delete + if id, err := oci.ContainerID(d.OCIBinary, d.MachineName); err == nil && id != "" { + return fmt.Errorf("expected no container ID be found for %q after delete. but got %q", d.MachineName, id) } return nil } @@ -287,40 +302,43 @@ func (d *Driver) Remove() error { func (d *Driver) Restart() error { s, err := d.GetState() if err != nil { - return errors.Wrap(err, "get kic state") + glog.Warningf("get state during restart: %v", err) } - switch s { - case state.Stopped: + if s == state.Stopped { // don't stop if already stopped return d.Start() - case state.Running, state.Error: - if err = d.Stop(); err != nil { - return fmt.Errorf("restarting a kic stop phase %v", err) - } - if err = d.Start(); err != nil { - return fmt.Errorf("restarting a kic start phase %v", err) - } - return nil } + if err = d.Stop(); err != nil { + return fmt.Errorf("stop during restart %v", err) + } + if err = d.Start(); err != nil { + return fmt.Errorf("start during restart %v", err) + } + return nil - return fmt.Errorf("restarted not implemented for kic state %s yet", s) } -// Start a _stopped_ kic container -// not meant to be used for Create(). +// Start an already created kic container func (d *Driver) Start() error { - s, err := d.GetState() - if err != nil { - return errors.Wrap(err, "get kic state") + cr := command.NewExecRunner() // using exec runner for interacting with docker/podman daemon + if _, err := cr.RunCmd(exec.Command(d.NodeConfig.OCIBinary, "start", d.MachineName)); err != nil { + return errors.Wrap(err, "start") } - if s == state.Stopped { - cmd := exec.Command(d.NodeConfig.OCIBinary, "start", d.MachineName) - if err := cmd.Run(); err != nil { - return errors.Wrapf(err, "starting a stopped kic node %s", d.MachineName) + checkRunning := func() error { + s, err := oci.ContainerStatus(d.NodeConfig.OCIBinary, d.MachineName) + if err != nil { + return err } + if s != state.Running { + return fmt.Errorf("expected container state be running but got %q", s) + } + glog.Infof("container %q state is running.", d.MachineName) return nil } - // TODO:medyagh maybe make it idempotent - return fmt.Errorf("cant start a not-stopped (%s) kic node", s) + + if err := retry.Expo(checkRunning, 500*time.Microsecond, time.Second*30); err != nil { + return err + } + return nil } // Stop a host gracefully, including any containers that we are managing. diff --git a/pkg/drivers/kic/oci/oci.go b/pkg/drivers/kic/oci/oci.go index f1e53843eb..89bc22df71 100644 --- a/pkg/drivers/kic/oci/oci.go +++ b/pkg/drivers/kic/oci/oci.go @@ -25,6 +25,7 @@ import ( "bufio" "bytes" + "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/constants" @@ -42,7 +43,7 @@ import ( func DeleteContainersByLabel(ociBin string, label string) []error { var deleteErrs []error - cs, err := listContainersByLabel(ociBin, label) + cs, err := ListContainersByLabel(ociBin, label) if err != nil { return []error{fmt.Errorf("listing containers by label %q", label)} } @@ -60,6 +61,9 @@ func DeleteContainersByLabel(ociBin string, label string) []error { glog.Errorf("%s daemon seems to be stuck. Please try restarting your %s. :%v", ociBin, ociBin, err) continue } + if err := ShutDown(ociBin, c); err != nil { + glog.Infof("couldn't shut down %s (might be okay): %v ", c, err) + } cmd := exec.Command(ociBin, "rm", "-f", "-v", c) if out, err := cmd.CombinedOutput(); err != nil { deleteErrs = append(deleteErrs, errors.Wrapf(err, "delete container %s: output %s", c, out)) @@ -77,6 +81,9 @@ func DeleteContainer(ociBin string, name string) error { glog.Errorf("%s daemon seems to be stuck. Please try restarting your %s. Will try to delete anyways: %v", ociBin, ociBin, err) } // try to delete anyways + if err := ShutDown(ociBin, name); err != nil { + glog.Infof("couldn't shut down %s (might be okay): %v ", name, err) + } cmd := exec.Command(ociBin, "rm", "-f", "-v", name) if out, err := cmd.CombinedOutput(); err != nil { return errors.Wrapf(err, "delete container %s: output %s", name, out) @@ -108,7 +115,9 @@ func CreateContainerNode(p CreateParams) error { // including some ones docker would otherwise do by default. // for now this is what we want. in the future we may revisit this. "--privileged", - "--security-opt", "seccomp=unconfined", // also ignore seccomp + "--security-opt", "seccomp=unconfined", // ignore seccomp + // ignore apparmore github actions docker: https://github.com/kubernetes/minikube/issues/7624 + "--security-opt", "apparmor=unconfined", "--tmpfs", "/tmp", // various things depend on working /tmp "--tmpfs", "/run", // systemd wants a writable /run // logs,pods be stroed on filesystem vs inside container, @@ -163,7 +172,7 @@ func CreateContainerNode(p CreateParams) error { if err != nil { return fmt.Errorf("temporary error checking status for %q : %v", p.Name, err) } - if s != "running" { + if s != state.Running { return fmt.Errorf("temporary error created container %q is not running yet", p.Name) } glog.Infof("the created container %q has a running status.", p.Name) @@ -313,7 +322,7 @@ func IsCreatedByMinikube(ociBinary string, nameOrID string) bool { // ListOwnedContainers lists all the containres that kic driver created on user's machine using a label func ListOwnedContainers(ociBinary string) ([]string, error) { - return listContainersByLabel(ociBinary, ProfileLabelKey) + return ListContainersByLabel(ociBinary, ProfileLabelKey) } // inspect return low-level information on containers @@ -443,8 +452,8 @@ func withPortMappings(portMappings []PortMapping) createOpt { } } -// listContainersByLabel returns all the container names with a specified label -func listContainersByLabel(ociBinary string, label string) ([]string, error) { +// ListContainersByLabel returns all the container names with a specified label +func ListContainersByLabel(ociBinary string, label string) ([]string, error) { stdout, err := WarnIfSlow(ociBinary, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}") if err != nil { return nil, err @@ -480,7 +489,51 @@ func PointToHostDockerDaemon() error { } // ContainerStatus returns status of a container running,exited,... -func ContainerStatus(ociBin string, name string) (string, error) { +func ContainerStatus(ociBin string, name string) (state.State, error) { out, err := WarnIfSlow(ociBin, "inspect", name, "--format={{.State.Status}}") - return strings.TrimSpace(string(out)), err + o := strings.TrimSpace(string(out)) + switch o { + case "running": + return state.Running, nil + case "exited": + return state.Stopped, nil + case "paused": + return state.Paused, nil + case "restarting": + return state.Starting, nil + case "dead": + return state.Error, nil + default: + return state.None, errors.Wrapf(err, "unknown state %q", name) + } +} + +// Shutdown will run command to shut down the container +// to ensure the containers process and networking bindings are all closed +// to avoid containers getting stuck before delete https://github.com/kubernetes/minikube/issues/7657 +func ShutDown(ociBin string, name string) error { + cmd := exec.Command(ociBin, "exec", "--privileged", "-t", name, "/bin/bash", "-c", "sudo init 0") + if out, err := cmd.CombinedOutput(); err != nil { + glog.Infof("error shutdown %s output %q : %v", name, out, err) + } + // helps with allowing docker realize the container is exited and report its status correctly. + time.Sleep(time.Second * 1) + // wait till it is stoped + stopped := func() error { + st, err := ContainerStatus(ociBin, name) + if st == state.Stopped { + glog.Infof("container %s status is %s", name, st) + return nil + } + if err != nil { + glog.Infof("temporary error verifying shutdown: %v", err) + } + glog.Infof("temporary error: container %s status is %s but expect it to be exited", name, st) + return errors.Wrap(err, "couldn't verify cointainer is exited. %v") + } + if err := retry.Expo(stopped, time.Millisecond*500, time.Second*20); err != nil { + return errors.Wrap(err, "verify shutdown") + } + glog.Infof("Successfully shutdown container %s", name) + return nil } diff --git a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1alpha1.go b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1alpha1.go deleted file mode 100644 index a06d6cfb90..0000000000 --- a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1alpha1.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ktmpl - -import "text/template" - -// V1Alpha1 is for Kubernetes v1.11 -var V1Alpha1 = template.Must(template.New("configTmpl-v1alpha1").Funcs(template.FuncMap{ - "printMapInOrder": printMapInOrder, -}).Parse(`apiVersion: kubeadm.k8s.io/v1alpha1 -kind: MasterConfiguration -{{if .NoTaintMaster}}noTaintMaster: true{{end}} -api: - advertiseAddress: {{.AdvertiseAddress}} - bindPort: {{.APIServerPort}} - controlPlaneEndpoint: {{.ControlPlaneAddress}} -kubernetesVersion: {{.KubernetesVersion}} -certificatesDir: {{.CertDir}} -networking: - serviceSubnet: {{.ServiceCIDR}} -etcd: - dataDir: {{.EtcdDataDir}} -nodeName: "{{.NodeName}}" -apiServerCertSANs: ["127.0.0.1", "localhost", "{{.AdvertiseAddress}}"] -{{if .ImageRepository}}imageRepository: {{.ImageRepository}} -{{end}}{{if .CRISocket}}criSocket: {{.CRISocket}} -{{end}}{{range .ComponentOptions}}{{.Component}}ExtraArgs:{{range $i, $val := printMapInOrder .ExtraArgs ": " }} - {{$val}}{{end}} -{{end}}{{if .FeatureArgs}}featureGates: {{range $i, $val := .FeatureArgs}} - {{$i}}: {{$val}}{{end}} -{{end}}`)) diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 46104c1d44..a740045d02 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -110,10 +110,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana opts.NoTaintMaster = true b := bytes.Buffer{} - configTmpl := ktmpl.V1Alpha1 - if version.GTE(semver.MustParse("1.12.0")) { - configTmpl = ktmpl.V1Alpha3 - } + configTmpl := ktmpl.V1Alpha3 // v1beta1 works in v1.13, but isn't required until v1.14. if version.GTE(semver.MustParse("1.14.0-alpha.0")) { configTmpl = ktmpl.V1Beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go index a2b53c3ea1..ec4910467b 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go @@ -70,7 +70,7 @@ func getExtraOptsPodCidr() []config.ExtraOption { func recentReleases() ([]string, error) { // test the 6 most recent releases - versions := []string{"v1.19", "v1.18", "v1.17", "v1.16", "v1.15", "v1.14", "v1.13", "v1.12", "v1.11"} + versions := []string{"v1.19", "v1.18", "v1.17", "v1.16", "v1.15", "v1.14", "v1.13", "v1.12"} foundNewest := false foundDefault := false diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go index 5b53632747..f65ac02716 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go @@ -55,7 +55,7 @@ Wants=docker.socket [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.11.10/kubelet --allow-privileged=true --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cadvisor-port=0 --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests +ExecStart=/var/lib/minikube/binaries/v1.12.0/kubelet --allow-privileged=true --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cadvisor-port=0 --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests [Install] `, @@ -200,10 +200,7 @@ ExecStart=/var/lib/minikube/binaries/v1.18.1/kubelet --authorization-mode=Webhoo Context: 1, }) if err != nil { - t.Fatalf("diff error: %v", err) - } - if diff != "" { - t.Errorf("unexpected diff:\n%s", diff) + t.Fatalf("diff error: %v\n%s", err, diff) } }) } diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/api_server.go b/pkg/minikube/bootstrapper/bsutil/kverify/api_server.go index 6aa6763a26..146eabc7c6 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/api_server.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/api_server.go @@ -166,7 +166,15 @@ func APIServerStatus(cr command.Runner, hostname string, port int) (state.State, rr, err = cr.RunCmd(exec.Command("sudo", "cat", path.Join("/sys/fs/cgroup/freezer", fparts[2], "freezer.state"))) if err != nil { - glog.Errorf("unable to get freezer state: %s", rr.Stderr.String()) + // example error from github action: + // cat: /sys/fs/cgroup/freezer/actions_job/e62ef4349cc5a70f4b49f8a150ace391da6ad6df27073c83ecc03dbf81fde1ce/kubepods/burstable/poda1de58db0ce81d19df7999f6808def1b/5df53230fe3483fd65f341923f18a477fda92ae9cd71061168130ef164fe479c/freezer.state: No such file or directory\n"* + // TODO: #7770 investigate how to handle this error better. + if strings.Contains(rr.Stderr.String(), "freezer.state: No such file or directory\n") { + glog.Infof("unable to get freezer state (might be okay and be related to #770): %s", rr.Stderr.String()) + } else { + glog.Warningf("unable to get freezer state : %s", rr.Stderr.String()) + } + return apiServerHealthz(hostname, port) } diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/default_sa.go b/pkg/minikube/bootstrapper/bsutil/kverify/default_sa.go index 453edff703..072446a254 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/default_sa.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/default_sa.go @@ -18,36 +18,36 @@ limitations under the License. package kverify import ( - "fmt" "time" "github.com/golang/glog" "github.com/pkg/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - "k8s.io/minikube/pkg/util/retry" + kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" ) // WaitForDefaultSA waits for the default service account to be created. func WaitForDefaultSA(cs *kubernetes.Clientset, timeout time.Duration) error { glog.Info("waiting for default service account to be created ...") start := time.Now() - saReady := func() error { + saReady := func() (bool, error) { // equivalent to manual check of 'kubectl --context profile get serviceaccount default' sas, err := cs.CoreV1().ServiceAccounts("default").List(meta.ListOptions{}) if err != nil { glog.Infof("temproary error waiting for default SA: %v", err) - return err + return false, nil } for _, sa := range sas.Items { if sa.Name == "default" { glog.Infof("found service account: %q", sa.Name) - return nil + return true, nil } } - return fmt.Errorf("couldn't find default service account") + return false, nil } - if err := retry.Expo(saReady, 500*time.Millisecond, timeout); err != nil { + if err := wait.PollImmediate(kconst.APICallRetryInterval, timeout, saReady); err != nil { return errors.Wrapf(err, "waited %s for SA", time.Since(start)) } diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go index f4486196c3..0bc234c701 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go @@ -32,7 +32,9 @@ const ( // DefaultSAWaitKey is the name used in the flags for default service account DefaultSAWaitKey = "default_sa" // AppsRunning is the name used in the flags for waiting for k8s-apps to be running - AppsRunning = "apps_running" + AppsRunningKey = "apps_running" + // NodeReadyKey is the name used in the flags for waiting for the node status to be ready + NodeReadyKey = "node_ready" ) // vars related to the --wait flag @@ -40,13 +42,13 @@ var ( // DefaultComponents is map of the the default components to wait for DefaultComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true} // NoWaitComponents is map of componets to wait for if specified 'none' or 'false' - NoComponents = map[string]bool{APIServerWaitKey: false, SystemPodsWaitKey: false, DefaultSAWaitKey: false, AppsRunning: false} + NoComponents = map[string]bool{APIServerWaitKey: false, SystemPodsWaitKey: false, DefaultSAWaitKey: false, AppsRunningKey: false, NodeReadyKey: false} // AllComponents is map for waiting for all components. - AllComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true, DefaultSAWaitKey: true, AppsRunning: true} + AllComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true, DefaultSAWaitKey: true, AppsRunningKey: true} // DefaultWaitList is list of all default components to wait for. only names to be used for start flags. DefaultWaitList = []string{APIServerWaitKey, SystemPodsWaitKey} // AllComponentsList list of all valid components keys to wait for. only names to be used used for start flags. - AllComponentsList = []string{APIServerWaitKey, SystemPodsWaitKey, DefaultSAWaitKey, AppsRunning} + AllComponentsList = []string{APIServerWaitKey, SystemPodsWaitKey, DefaultSAWaitKey, AppsRunningKey, NodeReadyKey} // AppsRunningList running list are valid k8s-app components to wait for them to be running AppsRunningList = []string{ "kube-dns", // coredns diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/node_conditions.go b/pkg/minikube/bootstrapper/bsutil/kverify/node_conditions.go new file mode 100644 index 0000000000..7e8e9a40a5 --- /dev/null +++ b/pkg/minikube/bootstrapper/bsutil/kverify/node_conditions.go @@ -0,0 +1,142 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kverify verifies a running kubernetes cluster is healthy +package kverify + +import ( + "fmt" + "time" + + "github.com/golang/glog" + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// NodeCondition represents a favorable or unfavorable node condition. +type NodeCondition struct { + Type v1.NodeConditionType + Status v1.ConditionStatus + Reason string + Message string +} + +// DiskPressure detects if the condition is disk pressure +func (pc *NodeCondition) DiskPressure() bool { + return pc.Type == v1.NodeDiskPressure && pc.Status == v1.ConditionTrue +} + +// MemoryPressure detects if the condition is memory pressure +func (pc *NodeCondition) MemoryPressure() bool { + return pc.Type == v1.NodeMemoryPressure && pc.Status == v1.ConditionTrue +} + +// PIDPressure detects if the condition is PID pressure +func (pc *NodeCondition) PIDPressure() bool { + return pc.Type == v1.NodePIDPressure && pc.Status == v1.ConditionTrue +} + +// NetworkUnavailable detects if the condition is PID pressure +func (pc *NodeCondition) NetworkUnavailable() bool { + return pc.Type == v1.NodeNetworkUnavailable && pc.Status == v1.ConditionTrue +} + +const errTextFormat = "node has unwanted condition %q : Reason %q Message: %q" + +// ErrMemoryPressure is thrown when there is node memory pressure condition +type ErrMemoryPressure struct { + NodeCondition +} + +func (e *ErrMemoryPressure) Error() string { + return fmt.Sprintf(errTextFormat, e.Type, e.Reason, e.Message) +} + +// ErrDiskPressure is thrown when there is node disk pressure condition +type ErrDiskPressure struct { + NodeCondition +} + +func (e *ErrDiskPressure) Error() string { + return fmt.Sprintf(errTextFormat, e.Type, e.Reason, e.Message) +} + +// ErrPIDPressure is thrown when there is node PID pressure condition +type ErrPIDPressure struct { + NodeCondition +} + +func (e *ErrPIDPressure) Error() string { + return fmt.Sprintf(errTextFormat, e.Type, e.Reason, e.Message) +} + +// ErrNetworkNotReady is thrown when there is node condition is network not ready +type ErrNetworkNotReady struct { + NodeCondition +} + +func (e *ErrNetworkNotReady) Error() string { + return fmt.Sprintf(errTextFormat, e.Type, e.Reason, e.Message) +} + +// NodePressure verfies that node is not under disk, memory, pid or network pressure. +func NodePressure(cs *kubernetes.Clientset) error { + glog.Info("verifying NodePressure condition ...") + start := time.Now() + defer func() { + glog.Infof("duration metric: took %s to run NodePressure ...", time.Since(start)) + }() + + ns, err := cs.CoreV1().Nodes().List(meta.ListOptions{}) + if err != nil { + return errors.Wrap(err, "list nodes") + } + + for _, n := range ns.Items { + glog.Infof("node storage ephemeral capacity is %s", n.Status.Capacity.StorageEphemeral()) + glog.Infof("node cpu capacity is %s", n.Status.Capacity.Cpu().AsDec()) + for _, c := range n.Status.Conditions { + pc := NodeCondition{Type: c.Type, Status: c.Status, Reason: c.Reason, Message: c.Message} + if pc.DiskPressure() { + return &ErrDiskPressure{ + NodeCondition: pc, + } + } + + if pc.MemoryPressure() { + return &ErrMemoryPressure{ + NodeCondition: pc, + } + } + + if pc.PIDPressure() { + return &ErrPIDPressure{ + NodeCondition: pc, + } + } + + if pc.NetworkUnavailable() { + return &ErrNetworkNotReady{ + NodeCondition: pc, + } + } + + } + } + return nil +} diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/node_ready.go b/pkg/minikube/bootstrapper/bsutil/kverify/node_ready.go new file mode 100644 index 0000000000..a9c1879b6a --- /dev/null +++ b/pkg/minikube/bootstrapper/bsutil/kverify/node_ready.go @@ -0,0 +1,64 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kverify verifies a running kubernetes cluster is healthy +package kverify + +import ( + "fmt" + "time" + + "github.com/golang/glog" + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" +) + +// WaitForNodeReady waits till kube client reports node status as "ready" +func WaitForNodeReady(cs *kubernetes.Clientset, timeout time.Duration) error { + glog.Info("waiting for node status to be ready ...") + start := time.Now() + defer func() { + glog.Infof("duration metric: took %s to wait for WaitForNodeReady...", time.Since(start)) + }() + checkReady := func() (bool, error) { + if time.Since(start) > timeout { + return false, fmt.Errorf("wait for node to be ready timed out") + } + ns, err := cs.CoreV1().Nodes().List(meta.ListOptions{}) + if err != nil { + glog.Infof("error listing nodes will retry: %v", err) + return false, nil + } + + for _, n := range ns.Items { + for _, c := range n.Status.Conditions { + if c.Type == v1.NodeReady && c.Status != v1.ConditionTrue { + glog.Infof("node %q has unwanted condition %q : Reason %q Message: %q. will try. ", n.Name, c.Type, c.Reason, c.Message) + return false, nil + } + } + } + return true, nil + } + if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, checkReady); err != nil { + return errors.Wrapf(err, "wait node ready") + } + return nil +} diff --git a/pkg/minikube/bootstrapper/images/kubeadm_test.go b/pkg/minikube/bootstrapper/images/kubeadm_test.go index d705b5e74c..00fc2673aa 100644 --- a/pkg/minikube/bootstrapper/images/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/images/kubeadm_test.go @@ -101,18 +101,6 @@ func TestKubeadmImages(t *testing.T) { "kubernetesui/dashboard:v2.0.0-rc6", "kubernetesui/metrics-scraper:v1.0.2", }}, - {"v1.11.10", "", []string{ - "k8s.gcr.io/kube-proxy-amd64:v1.11.10", - "k8s.gcr.io/kube-scheduler-amd64:v1.11.10", - "k8s.gcr.io/kube-controller-manager-amd64:v1.11.10", - "k8s.gcr.io/kube-apiserver-amd64:v1.11.10", - "k8s.gcr.io/coredns:1.1.3", - "k8s.gcr.io/etcd-amd64:3.2.18", - "k8s.gcr.io/pause:3.1", - "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "kubernetesui/dashboard:v2.0.0-rc6", - "kubernetesui/metrics-scraper:v1.0.2", - }}, } for _, tc := range tests { got, err := Kubeadm(tc.mirror, tc.version) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 3be149ae2f..d41ba6d248 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -21,6 +21,7 @@ import ( "context" "os/exec" "path" + "runtime" "sync" "fmt" @@ -37,9 +38,11 @@ import ( "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/minikube/pkg/drivers/kic" + "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/kapi" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/bootstrapper" @@ -174,14 +177,27 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { "FileAvailable--etc-kubernetes-manifests-etcd.yaml", "Port-10250", // For "none" users who already have a kubelet online "Swap", // For "none" users who have swap configured - "SystemVerification", } ignore = append(ignore, bsutil.SkipAdditionalPreflights[r.Name()]...) + skipSystemVerification := false // Allow older kubeadm versions to function with newer Docker releases. + if version.LT(semver.MustParse("1.13.0")) { + glog.Infof("ignoring SystemVerification for kubeadm because of old kubernetes version %v", version) + skipSystemVerification = true + } + if driver.BareMetal(cfg.Driver) && r.Name() == "Docker" { + if v, err := r.Version(); err == nil && strings.Contains(v, "azure") { + glog.Infof("ignoring SystemVerification for kubeadm because of unknown docker version %s", v) + skipSystemVerification = true + } + } // For kic on linux example error: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.2.17-1rodete3-amd64" - if version.LT(semver.MustParse("1.13.0")) || driver.IsKIC(cfg.Driver) { - glog.Info("ignoring SystemVerification for kubeadm because of either driver or kubernetes version") + if driver.IsKIC(cfg.Driver) { + glog.Infof("ignoring SystemVerification for kubeadm because of %s driver", cfg.Driver) + skipSystemVerification = true + } + if skipSystemVerification { ignore = append(ignore, "SystemVerification") } @@ -202,9 +218,13 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { } var wg sync.WaitGroup - wg.Add(4) + wg.Add(3) go func() { + // we need to have cluster role binding before applying overlay to avoid #7428 + if err := k.elevateKubeSystemPrivileges(cfg); err != nil { + glog.Errorf("unable to create cluster role binding, some addons might not work: %v", err) + } // the overlay is required for containerd and cri-o runtime: see #7428 if driver.IsKIC(cfg.Driver) && cfg.KubernetesConfig.ContainerRuntime != "docker" { if err := k.applyKICOverlay(cfg); err != nil { @@ -228,13 +248,6 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { wg.Done() }() - go func() { - if err := k.elevateKubeSystemPrivileges(cfg); err != nil { - glog.Warningf("unable to create cluster role binding, some addons might not work: %v", err) - } - wg.Done() - }() - wg.Wait() return nil } @@ -316,7 +329,7 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(ip, strconv.Itoa(port))) if cc.Host != endpoint { - glog.Errorf("Overriding stale ClientConfig host %s with %s", cc.Host, endpoint) + glog.Warningf("Overriding stale ClientConfig host %s with %s", cc.Host, endpoint) cc.Host = endpoint } c, err := kubernetes.NewForConfig(cc) @@ -327,16 +340,36 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error } // WaitForNode blocks until the node appears to be healthy -func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, timeout time.Duration) error { +func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, timeout time.Duration) (waitErr error) { start := time.Now() if !n.ControlPlane { glog.Infof("%s is not a control plane, nothing to wait for", n.Name) return nil } + + out.T(out.HealthCheck, "Verifying Kubernetes components...") + + // TODO: #7706: for better performance we could use k.client inside minikube to avoid asking for external IP:PORT + hostname, _, port, err := driver.ControlPaneEndpoint(&cfg, &n, cfg.Driver) + if err != nil { + return errors.Wrap(err, "get control plane endpoint") + } + + defer func() { // run pressure verification after all other checks, so there be an api server to talk to. + client, err := k.client(hostname, port) + if err != nil { + waitErr = errors.Wrap(err, "get k8s client") + } + if err := kverify.NodePressure(client); err != nil { + adviseNodePressure(err, cfg.Name, cfg.Driver) + waitErr = errors.Wrap(err, "node pressure") + } + }() + if !kverify.ShouldWait(cfg.VerifyComponents) { glog.Infof("skip waiting for components based on config.") - return nil + return waitErr } cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) @@ -344,11 +377,6 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time return errors.Wrapf(err, "create runtme-manager %s", cfg.KubernetesConfig.ContainerRuntime) } - hostname, _, port, err := driver.ControlPaneEndpoint(&cfg, &n, cfg.Driver) - if err != nil { - return errors.Wrap(err, "get control plane endpoint") - } - if cfg.VerifyComponents[kverify.APIServerWaitKey] { client, err := k.client(hostname, port) if err != nil { @@ -383,7 +411,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time } } - if cfg.VerifyComponents[kverify.AppsRunning] { + if cfg.VerifyComponents[kverify.AppsRunningKey] { client, err := k.client(hostname, port) if err != nil { return errors.Wrap(err, "get k8s client") @@ -393,8 +421,18 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time } } + if cfg.VerifyComponents[kverify.NodeReadyKey] { + client, err := k.client(hostname, port) + if err != nil { + return errors.Wrap(err, "get k8s client") + } + if err := kverify.WaitForNodeReady(client, timeout); err != nil { + return errors.Wrap(err, "waiting for node to be ready") + } + } + glog.Infof("duration metric: took %s to wait for : %+v ...", time.Since(start), cfg.VerifyComponents) - return nil + return waitErr } // needsReset returns whether or not the cluster needs to be reconfigured @@ -424,7 +462,8 @@ func (k *Bootstrapper) needsReset(conf string, hostname string, port int, client glog.Infof("needs reset: %v", err) return true } - + // to be used in the ingeration test to verify it wont reset. + glog.Infof("The running cluster does not need a reset. hostname: %s", hostname) return false } @@ -518,12 +557,16 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "system pods") } + if err := kverify.NodePressure(client); err != nil { + adviseNodePressure(err, cfg.Name, cfg.Driver) + } + // This can fail during upgrades if the old pods have not shut down yet addonPhase := func() error { _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, conf))) return err } - if err = retry.Expo(addonPhase, 1*time.Second, 30*time.Second); err != nil { + if err = retry.Expo(addonPhase, 100*time.Microsecond, 30*time.Second); err != nil { glog.Warningf("addon install failed, wil retry: %v", err) return errors.Wrap(err, "addons") } @@ -573,14 +616,24 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { // DeleteCluster removes the components that were started earlier func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { + cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket}) + if err != nil { + return errors.Wrap(err, "runtime") + } + version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { return errors.Wrap(err, "parsing kubernetes version") } - cmd := fmt.Sprintf("%s reset --force", bsutil.InvokeKubeadm(k8s.KubernetesVersion)) + ka := bsutil.InvokeKubeadm(k8s.KubernetesVersion) + sp := cr.SocketPath() + if sp == "" { + sp = kconst.DefaultDockerCRISocket + } + cmd := fmt.Sprintf("%s reset --cri-socket %s --force", ka, sp) if version.LT(semver.MustParse("1.11.0")) { - cmd = fmt.Sprintf("%s reset", bsutil.InvokeKubeadm(k8s.KubernetesVersion)) + cmd = fmt.Sprintf("%s reset --cri-socket %s", ka, sp) } rr, derr := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)) @@ -592,11 +645,6 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { glog.Warningf("stop kubelet: %v", err) } - cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket}) - if err != nil { - return errors.Wrap(err, "runtime") - } - containers, err := cr.ListContainers(cruntime.ListOptions{Namespaces: []string{"kube-system"}}) if err != nil { glog.Warningf("unable to list kube-system containers: %v", err) @@ -750,6 +798,9 @@ func startKubeletIfRequired(runner command.Runner, sm sysinit.Manager) error { return errors.Wrap(err, "starting kubelet") } + if err := sm.Enable("kubelet"); err != nil { + return err + } return sm.Start("kubelet") } @@ -820,6 +871,10 @@ func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error { // elevateKubeSystemPrivileges gives the kube-system service account cluster admin privileges to work with RBAC. func (k *Bootstrapper) elevateKubeSystemPrivileges(cfg config.ClusterConfig) error { start := time.Now() + defer func() { + glog.Infof("duration metric: took %s to wait for elevateKubeSystemPrivileges.", time.Since(start)) + }() + // Allow no more than 5 seconds for creating cluster role bindings ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -836,6 +891,84 @@ func (k *Bootstrapper) elevateKubeSystemPrivileges(cfg config.ClusterConfig) err return nil } } - glog.Infof("duration metric: took %s to wait for elevateKubeSystemPrivileges.", time.Since(start)) - return err + + if cfg.VerifyComponents[kverify.DefaultSAWaitKey] { + // double checking defalut sa was created. + // good for ensuring using minikube in CI is robust. + checkSA := func() (bool, error) { + cmd = exec.Command("sudo", kubectlPath(cfg), + "get", "sa", "default", fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig"))) + rr, err = k.c.RunCmd(cmd) + if err != nil { + return false, nil + } + return true, nil + } + + // retry up to make sure SA is created + if err := wait.PollImmediate(kconst.APICallRetryInterval, time.Minute, checkSA); err != nil { + return errors.Wrap(err, "ensure sa was created") + } + } + return nil +} + +// adviseNodePressure will advise the user what to do with difference pressure errors based on their environment +func adviseNodePressure(err error, name string, drv string) { + if diskErr, ok := err.(*kverify.ErrDiskPressure); ok { + out.ErrLn("") + glog.Warning(diskErr) + out.WarningT("The node {{.name}} has ran out of disk space.", out.V{"name": name}) + // generic advice for all drivers + out.T(out.Tip, "Please free up disk or prune images.") + if driver.IsVM(drv) { + out.T(out.Stopped, "Please create a cluster with bigger disk size: `minikube start --disk SIZE_MB` ") + } else if drv == oci.Docker && runtime.GOOS != "linux" { + out.T(out.Stopped, "Please increse Desktop's disk size.") + if runtime.GOOS == "darwin" { + out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-mac/space/"}) + } + if runtime.GOOS == "windows" { + out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-windows/"}) + } + } + out.ErrLn("") + return + } + + if memErr, ok := err.(*kverify.ErrMemoryPressure); ok { + out.ErrLn("") + glog.Warning(memErr) + out.WarningT("The node {{.name}} has ran out of memory.", out.V{"name": name}) + out.T(out.Tip, "Check if you have unnecessary pods running by running 'kubectl get po -A") + if driver.IsVM(drv) { + out.T(out.Stopped, "Consider creating a cluster with larger memory size using `minikube start --memory SIZE_MB` ") + } else if drv == oci.Docker && runtime.GOOS != "linux" { + out.T(out.Stopped, "Consider increasing Docker Desktop's memory size.") + if runtime.GOOS == "darwin" { + out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-mac/space/"}) + } + if runtime.GOOS == "windows" { + out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-windows/"}) + } + } + out.ErrLn("") + return + } + + if pidErr, ok := err.(*kverify.ErrPIDPressure); ok { + glog.Warning(pidErr) + out.ErrLn("") + out.WarningT("The node {{.name}} has ran out of available PIDs.", out.V{"name": name}) + out.ErrLn("") + return + } + + if netErr, ok := err.(*kverify.ErrNetworkNotReady); ok { + glog.Warning(netErr) + out.ErrLn("") + out.WarningT("The node {{.name}} network is not available. Please verify network settings.", out.V{"name": name}) + out.ErrLn("") + return + } } diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index bfb6298c71..8345341c51 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -21,6 +21,7 @@ import ( "io/ioutil" "os" "path/filepath" + "regexp" "strings" "github.com/golang/glog" @@ -83,6 +84,16 @@ func PrimaryControlPlane(cc *ClusterConfig) (Node, error) { return cp, nil } +// ProfileNameValid checks if the profile name is container name friendly +func ProfileNameValid(name string) bool { + + // RestrictedNameChars collects the characters allowed to represent a name + const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` + + var validName = regexp.MustCompile(`^` + RestrictedNameChars + `+$`) + return validName.MatchString(name) +} + // ProfileNameInReservedKeywords checks if the profile is an internal keywords func ProfileNameInReservedKeywords(name string) bool { for _, v := range keywords { diff --git a/pkg/minikube/config/profile_test.go b/pkg/minikube/config/profile_test.go index 805123ed19..92e580f9f2 100644 --- a/pkg/minikube/config/profile_test.go +++ b/pkg/minikube/config/profile_test.go @@ -72,6 +72,27 @@ func TestListProfiles(t *testing.T) { } } +func TestProfileNameValid(t *testing.T) { + var testCases = []struct { + name string + expected bool + }{ + {"meaningful_name", true}, + {"meaningful_name@", false}, + {"n_a_m_e_2", true}, + {"n", false}, + {"_name", false}, + {"N__a.M--E12567", true}, + } + for _, tt := range testCases { + got := ProfileNameValid(tt.name) + if got != tt.expected { + t.Errorf("expected ProfileNameValid(%s)=%t but got %t ", tt.name, tt.expected, got) + } + } + +} + func TestProfileNameInReservedKeywords(t *testing.T) { var testCases = []struct { name string diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index 773e70a347..d952d16c69 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -31,7 +31,7 @@ const ( // NewestKubernetesVersion is the newest Kubernetes version to test against NewestKubernetesVersion = "v1.18.1" // OldestKubernetesVersion is the oldest Kubernetes version to test against - OldestKubernetesVersion = "v1.11.10" + OldestKubernetesVersion = "v1.12.0" // DefaultClusterName is the default nane for the k8s cluster DefaultClusterName = "minikube" // DockerDaemonPort is the port Docker daemon listening inside a minikube node (vm or container). @@ -40,6 +40,8 @@ const ( APIServerPort = 8443 // SSHPort is the SSH serviceport on the node vm and container SSHPort = 22 + // RegistryAddonPort os the default registry addon port + RegistryAddonPort = 5000 // APIServerName is the default API server name APIServerName = "minikubeCA" diff --git a/pkg/minikube/cruntime/containerd.go b/pkg/minikube/cruntime/containerd.go index 8eadb57392..73261c9819 100644 --- a/pkg/minikube/cruntime/containerd.go +++ b/pkg/minikube/cruntime/containerd.go @@ -19,16 +19,20 @@ package cruntime import ( "bytes" "encoding/base64" + "encoding/json" "fmt" "os/exec" "path" "strings" "text/template" + "time" "github.com/blang/semver" "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/bootstrapper/images" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/out" @@ -310,5 +314,118 @@ func (r *Containerd) Preload(cfg config.KubernetesConfig) error { if !download.PreloadExists(cfg.KubernetesVersion, cfg.ContainerRuntime) { return nil } - return fmt.Errorf("not yet implemented for %s", r.Name()) + + k8sVersion := cfg.KubernetesVersion + cRuntime := cfg.ContainerRuntime + + // If images already exist, return + images, err := images.Kubeadm(cfg.ImageRepository, k8sVersion) + if err != nil { + return errors.Wrap(err, "getting images") + } + if containerdImagesPreloaded(r.Runner, images) { + glog.Info("Images already preloaded, skipping extraction") + return nil + } + + tarballPath := download.TarballPath(k8sVersion, cRuntime) + targetDir := "/" + targetName := "preloaded.tar.lz4" + dest := path.Join(targetDir, targetName) + + c := exec.Command("which", "lz4") + if _, err := r.Runner.RunCmd(c); err != nil { + return NewErrISOFeature("lz4") + } + + // Copy over tarball into host + fa, err := assets.NewFileAsset(tarballPath, targetDir, targetName, "0644") + if err != nil { + return errors.Wrap(err, "getting file asset") + } + t := time.Now() + if err := r.Runner.Copy(fa); err != nil { + return errors.Wrap(err, "copying file") + } + glog.Infof("Took %f seconds to copy over tarball", time.Since(t).Seconds()) + + t = time.Now() + // extract the tarball to /var in the VM + if rr, err := r.Runner.RunCmd(exec.Command("sudo", "tar", "-I", "lz4", "-C", "/var", "-xvf", dest)); err != nil { + return errors.Wrapf(err, "extracting tarball: %s", rr.Output()) + } + glog.Infof("Took %f seconds t extract the tarball", time.Since(t).Seconds()) + + // remove the tarball in the VM + if err := r.Runner.Remove(fa); err != nil { + glog.Infof("error removing tarball: %v", err) + } + + return r.Restart() +} + +// Restart restarts Docker on a host +func (r *Containerd) Restart() error { + return r.Init.Restart("containerd") +} + +// containerdImagesPreloaded returns true if all images have been preloaded +func containerdImagesPreloaded(runner command.Runner, images []string) bool { + rr, err := runner.RunCmd(exec.Command("sudo", "crictl", "images", "--output", "json")) + if err != nil { + return false + } + type containerdImages struct { + Images []struct { + ID string `json:"id"` + RepoTags []string `json:"repoTags"` + RepoDigests []string `json:"repoDigests"` + Size string `json:"size"` + UID interface{} `json:"uid"` + Username string `json:"username"` + } `json:"images"` + } + + var jsonImages containerdImages + err = json.Unmarshal(rr.Stdout.Bytes(), &jsonImages) + if err != nil { + glog.Errorf("failed to unmarshal images, will assume images are not preloaded") + return false + } + + // Make sure images == imgs + for _, i := range images { + found := false + for _, ji := range jsonImages.Images { + for _, rt := range ji.RepoTags { + i = addRepoTagToImageName(i) + if i == rt { + found = true + break + } + } + if found { + break + } + + } + if !found { + glog.Infof("couldn't find preloaded image for %q. assuming images are not preloaded.", i) + return false + } + } + glog.Infof("all images are preloaded for containerd runtime.") + return true +} + +// addRepoTagToImageName makes sure the image name has a repo tag in it. +// in crictl images list have the repo tag prepended to them +// for example "kubernetesui/dashboard:v2.0.0 will show up as "docker.io/kubernetesui/dashboard:v2.0.0" +// warning this is only meant for kuberentes images where we know the GCR addreses have .io in them +// not mean to be used for public images +func addRepoTagToImageName(imgName string) string { + if !strings.Contains(imgName, ".io/") { + return "docker.io/" + imgName + } // else it already has repo name dont add anything + return imgName } diff --git a/pkg/minikube/cruntime/containerd_test.go b/pkg/minikube/cruntime/containerd_test.go new file mode 100644 index 0000000000..cfb1e55f67 --- /dev/null +++ b/pkg/minikube/cruntime/containerd_test.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cruntime + +import ( + "testing" +) + +func TestAddRepoTagToImageName(t *testing.T) { + var tests = []struct { + imgName string + want string + }{ + {"kubernetesui/dashboard:v2.0.0-rc6", "docker.io/kubernetesui/dashboard:v2.0.0-rc6"}, + {"kubernetesui/metrics-scraper:v1.0.2", "docker.io/kubernetesui/metrics-scraper:v1.0.2"}, + {"gcr.io/k8s-minikube/storage-provisioner:v1.8.1", "gcr.io/k8s-minikube/storage-provisioner:v1.8.1"}, + } + for _, tc := range tests { + t.Run(tc.imgName, func(t *testing.T) { + got := addRepoTagToImageName(tc.imgName) + if got != tc.want { + t.Errorf("expected image name to be: %q but got %q", tc.want, got) + } + }) + } +} diff --git a/pkg/minikube/cruntime/cruntime.go b/pkg/minikube/cruntime/cruntime.go index bbb410be13..f1866ff1aa 100644 --- a/pkg/minikube/cruntime/cruntime.go +++ b/pkg/minikube/cruntime/cruntime.go @@ -220,3 +220,14 @@ func enableIPForwarding(cr CommandRunner) error { } return nil } + +// ImagesPreloaded returns true if all images have been preloaded +func ImagesPreloaded(containerRuntime string, runner command.Runner, images []string) bool { + if containerRuntime == "docker" { + return dockerImagesPreloaded(runner, images) + } + if containerRuntime == "containerd" { + return containerdImagesPreloaded(runner, images) + } + return false +} diff --git a/pkg/minikube/cruntime/docker.go b/pkg/minikube/cruntime/docker.go index 95f8554152..eb11b58e83 100644 --- a/pkg/minikube/cruntime/docker.go +++ b/pkg/minikube/cruntime/docker.go @@ -290,7 +290,7 @@ func (r *Docker) Preload(cfg config.KubernetesConfig) error { if err != nil { return errors.Wrap(err, "getting images") } - if DockerImagesPreloaded(r.Runner, images) { + if dockerImagesPreloaded(r.Runner, images) { glog.Info("Images already preloaded, skipping extraction") return nil } @@ -342,8 +342,8 @@ func (r *Docker) Preload(cfg config.KubernetesConfig) error { return r.Restart() } -// DockerImagesPreloaded returns true if all images have been preloaded -func DockerImagesPreloaded(runner command.Runner, images []string) bool { +// dockerImagesPreloaded returns true if all images have been preloaded +func dockerImagesPreloaded(runner command.Runner, images []string) bool { rr, err := runner.RunCmd(exec.Command("docker", "images", "--format", "{{.Repository}}:{{.Tag}}")) if err != nil { return false diff --git a/pkg/minikube/download/binary_test.go b/pkg/minikube/download/binary_test.go index 79c23131c2..5a481405e2 100644 --- a/pkg/minikube/download/binary_test.go +++ b/pkg/minikube/download/binary_test.go @@ -31,12 +31,24 @@ func TestCacheBinary(t *testing.T) { if err != nil { t.Fatalf("error during creating tmp dir: %v", err) } + defer func() { //clean up tempdir + err := os.RemoveAll(minikubeHome) + if err != nil { + t.Errorf("failed to clean up temp folder %q", minikubeHome) + } + }() + defer os.RemoveAll(minikubeHome) noWritePermDir, err := ioutil.TempDir("/tmp", "") if err != nil { t.Fatalf("error during creating tmp dir: %v", err) } - defer os.RemoveAll(noWritePermDir) + defer func() { //clean up tempdir + err := os.RemoveAll(noWritePermDir) + if err != nil { + t.Errorf("failed to clean up temp folder %q", noWritePermDir) + } + }() err = os.Chmod(noWritePermDir, 0000) if err != nil { t.Fatalf("error (%v) during changing permissions of dir %v", err, noWritePermDir) diff --git a/pkg/minikube/download/preload.go b/pkg/minikube/download/preload.go index 59a0c4b7e0..50a12a37bb 100644 --- a/pkg/minikube/download/preload.go +++ b/pkg/minikube/download/preload.go @@ -78,16 +78,16 @@ func remoteTarballURL(k8sVersion, containerRuntime string) string { // PreloadExists returns true if there is a preloaded tarball that can be used func PreloadExists(k8sVersion, containerRuntime string) bool { + // TODO: debug why this func is being called two times glog.Infof("Checking if preload exists for k8s version %s and runtime %s", k8sVersion, containerRuntime) if !viper.GetBool("preload") { return false } - // See https://github.com/kubernetes/minikube/issues/6933 // and https://github.com/kubernetes/minikube/issues/6934 - // to track status of adding containerd & crio - if containerRuntime != "docker" { - glog.Info("Container runtime isn't docker, skipping preload") + // to track status of adding crio + if containerRuntime == "crio" { + glog.Info("crio is not supported yet, skipping preload") return false } diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index 77b44e15ef..a55f7de441 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -140,6 +140,16 @@ func HasResourceLimits(name string) bool { return !(name == None || name == Podman) } +// NeedsShutdown returns true if driver needs manual shutdown command before stopping. +// Hyper-V requires special care to avoid ACPI and file locking issues +// KIC also needs shutdown to avoid container getting stuck, https://github.com/kubernetes/minikube/issues/7657 +func NeedsShutdown(name string) bool { + if name == HyperV || IsKIC(name) { + return true + } + return false +} + // FlagHints are hints for what default options should be used for this driver type FlagHints struct { ExtraOptions []string diff --git a/pkg/minikube/extract/extract.go b/pkg/minikube/extract/extract.go index 584acd827e..464cfe66a8 100644 --- a/pkg/minikube/extract/extract.go +++ b/pkg/minikube/extract/extract.go @@ -454,7 +454,7 @@ func writeStringsToFiles(e *state, output string) error { if !strings.HasSuffix(path, ".json") { return nil } - fmt.Printf("Writing to %s\n", filepath.Base(path)) + fmt.Printf("Writing to %s", filepath.Base(path)) currentTranslations := make(map[string]interface{}) f, err := ioutil.ReadFile(path) if err != nil { @@ -482,6 +482,16 @@ func writeStringsToFiles(e *state, output string) error { } } + t := 0 // translated + u := 0 // untranslated + for k := range e.translations { + if currentTranslations[k] != "" { + t++ + } else { + u++ + } + } + c, err := json.MarshalIndent(currentTranslations, "", "\t") if err != nil { return errors.Wrap(err, "marshalling translations") @@ -490,10 +500,26 @@ func writeStringsToFiles(e *state, output string) error { if err != nil { return errors.Wrap(err, "writing translation file") } + + fmt.Printf(" (%d translated, %d untranslated)\n", t, u) return nil }) - return err + if err != nil { + return err + } + + c, err := json.MarshalIndent(e.translations, "", "\t") + if err != nil { + return errors.Wrap(err, "marshalling translations") + } + path := filepath.Join(output, "strings.txt") + err = lock.WriteFile(path, c, 0644) + if err != nil { + return errors.Wrap(err, "writing translation file") + } + + return nil } // addParentFuncToList adds the current parent function to the list of functions to inspect more closely. diff --git a/pkg/minikube/extract/extract_test.go b/pkg/minikube/extract/extract_test.go index c0ac9914f9..352151dc2f 100644 --- a/pkg/minikube/extract/extract_test.go +++ b/pkg/minikube/extract/extract_test.go @@ -36,7 +36,12 @@ func TestExtract(t *testing.T) { if err != nil { t.Fatalf("Creating temp dir: %v", err) } - defer os.RemoveAll(tempdir) + defer func() { //clean up tempdir + err := os.RemoveAll(tempdir) + if err != nil { + t.Errorf("failed to clean up temp folder %q", tempdir) + } + }() src, err := ioutil.ReadFile("testdata/test.json") if err != nil { diff --git a/pkg/minikube/image/image.go b/pkg/minikube/image/image.go index 0c77c20c80..05904d9e93 100644 --- a/pkg/minikube/image/image.go +++ b/pkg/minikube/image/image.go @@ -79,23 +79,28 @@ func DigestByGoLib(imgName string) string { return cf.Hex } -// WriteImageToDaemon write img to the local docker daemon -func WriteImageToDaemon(img string) error { - glog.Infof("Writing %s to local daemon", img) - +// ExistsImageInDaemon if img exist in local docker daemon +func ExistsImageInDaemon(img string) bool { // Check if image exists locally cmd := exec.Command("docker", "images", "--format", "{{.Repository}}:{{.Tag}}@{{.Digest}}") if output, err := cmd.Output(); err == nil { if strings.Contains(string(output), img) { glog.Infof("Found %s in local docker daemon, skipping pull", img) - return nil + return true } } // Else, pull it + return false +} + +// WriteImageToDaemon write img to the local docker daemon +func WriteImageToDaemon(img string) error { + glog.Infof("Writing %s to local daemon", img) ref, err := name.ParseReference(img) if err != nil { return errors.Wrap(err, "parsing reference") } + glog.V(3).Infof("Getting image %v", ref) i, err := remote.Image(ref) if err != nil { return errors.Wrap(err, "getting remote image") @@ -104,8 +109,26 @@ func WriteImageToDaemon(img string) error { if err != nil { return errors.Wrap(err, "getting tag") } + glog.V(3).Infof("Writing image %v", tag) _, err = daemon.Write(tag, i) - return err + if err != nil { + return errors.Wrap(err, "writing image") + } + + //TODO: Make pkg/v1/daemon accept Ref too + // Only added it to pkg/v1/tarball + // + // https://github.com/google/go-containerregistry/pull/702 + + glog.V(3).Infof("Pulling image %v", ref) + + // Pull digest + cmd := exec.Command("docker", "pull", "--quiet", img) + if _, err := cmd.Output(); err != nil { + return errors.Wrap(err, "pulling remote image") + } + + return nil } func retrieveImage(ref name.Reference) (v1.Image, error) { diff --git a/pkg/minikube/kubeconfig/kubeconfig.go b/pkg/minikube/kubeconfig/kubeconfig.go index b6ecf1d4a0..566353eb32 100644 --- a/pkg/minikube/kubeconfig/kubeconfig.go +++ b/pkg/minikube/kubeconfig/kubeconfig.go @@ -21,6 +21,7 @@ import ( "io/ioutil" "net/url" "os" + "path" "path/filepath" "strconv" @@ -30,6 +31,7 @@ import ( "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/tools/clientcmd/api/latest" "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/localpath" pkgutil "k8s.io/minikube/pkg/util" "k8s.io/minikube/pkg/util/lock" ) @@ -103,24 +105,43 @@ func Endpoint(contextName string, configPath ...string) (string, int, error) { } // UpdateEndpoint overwrites the IP stored in kubeconfig with the provided IP. -func UpdateEndpoint(contextName string, hostname string, port int, path string) (bool, error) { +func UpdateEndpoint(contextName string, hostname string, port int, confpath string) (bool, error) { if hostname == "" { return false, fmt.Errorf("empty ip") } - err := VerifyEndpoint(contextName, hostname, port, path) + err := VerifyEndpoint(contextName, hostname, port, confpath) if err == nil { return false, nil } glog.Infof("verify returned: %v", err) - cfg, err := readOrNew(path) + cfg, err := readOrNew(confpath) if err != nil { return false, errors.Wrap(err, "read") } - cfg.Clusters[contextName].Server = "https://" + hostname + ":" + strconv.Itoa(port) - err = writeToFile(cfg, path) + address := "https://" + hostname + ":" + strconv.Itoa(port) + + // if the kubeconfig is missed, create new one + if len(cfg.Clusters) == 0 { + lp := localpath.Profile(contextName) + gp := localpath.MiniPath() + kcs := &Settings{ + ClusterName: contextName, + ClusterServerAddress: address, + ClientCertificate: path.Join(lp, "client.crt"), + ClientKey: path.Join(lp, "client.key"), + CertificateAuthority: path.Join(gp, "ca.crt"), + KeepContext: false, + } + err = PopulateFromSettings(kcs, cfg) + if err != nil { + return false, errors.Wrap(err, "populating kubeconfig") + } + } + cfg.Clusters[contextName].Server = address + err = writeToFile(cfg, confpath) if err != nil { return false, errors.Wrap(err, "write") } diff --git a/pkg/minikube/kubeconfig/kubeconfig_test.go b/pkg/minikube/kubeconfig/kubeconfig_test.go index c41f167f03..8a0df17120 100644 --- a/pkg/minikube/kubeconfig/kubeconfig_test.go +++ b/pkg/minikube/kubeconfig/kubeconfig_test.go @@ -167,6 +167,13 @@ func TestUpdate(t *testing.T) { if err != nil { t.Fatalf("Error making temp directory %v", err) } + defer func() { //clean up tempdir + err := os.RemoveAll(tmpDir) + if err != nil { + t.Errorf("failed to clean up temp folder %q", tmpDir) + } + }() + test.cfg.SetPath(filepath.Join(tmpDir, "kubeconfig")) if len(test.existingCfg) != 0 { if err := ioutil.WriteFile(test.cfg.filePath(), test.existingCfg, 0600); err != nil { diff --git a/pkg/minikube/localpath/localpath_test.go b/pkg/minikube/localpath/localpath_test.go index d8e6915a43..00f52d09da 100644 --- a/pkg/minikube/localpath/localpath_test.go +++ b/pkg/minikube/localpath/localpath_test.go @@ -33,7 +33,12 @@ func TestReplaceWinDriveLetterToVolumeName(t *testing.T) { if err != nil { t.Fatalf("Error make tmp directory: %v", err) } - defer os.RemoveAll(path) + defer func() { //clean up tempdir + err := os.RemoveAll(path) + if err != nil { + t.Errorf("failed to clean up temp folder %q", path) + } + }() if runtime.GOOS != "windows" { // Replace to fake func. diff --git a/pkg/minikube/machine/cache_binaries_test.go b/pkg/minikube/machine/cache_binaries_test.go index 500c0ea74d..f83fbfc832 100644 --- a/pkg/minikube/machine/cache_binaries_test.go +++ b/pkg/minikube/machine/cache_binaries_test.go @@ -89,7 +89,13 @@ func TestCacheBinariesForBootstrapper(t *testing.T) { if err != nil { t.Fatalf("error during creating tmp dir: %v", err) } - defer os.RemoveAll(minikubeHome) + + defer func() { //clean up tempdir + err := os.RemoveAll(minikubeHome) + if err != nil { + t.Errorf("failed to clean up temp folder %q", minikubeHome) + } + }() var tc = []struct { version, clusterBootstrapper string diff --git a/pkg/minikube/machine/cache_images.go b/pkg/minikube/machine/cache_images.go index 93f735467c..94702cbc07 100644 --- a/pkg/minikube/machine/cache_images.go +++ b/pkg/minikube/machine/cache_images.go @@ -65,11 +65,10 @@ func CacheImagesForBootstrapper(imageRepository string, version string, clusterB // LoadImages loads previously cached images into the container runtime func LoadImages(cc *config.ClusterConfig, runner command.Runner, images []string, cacheDir string) error { // Skip loading images if images already exist - if cruntime.DockerImagesPreloaded(runner, images) { + if cruntime.ImagesPreloaded(cc.KubernetesConfig.ContainerRuntime, runner, images) { glog.Infof("Images are preloaded, skipping loading") return nil } - glog.Infof("LoadImages start: %s", images) start := time.Now() diff --git a/pkg/minikube/machine/client_test.go b/pkg/minikube/machine/client_test.go index 36465c5b08..b2cafc9428 100644 --- a/pkg/minikube/machine/client_test.go +++ b/pkg/minikube/machine/client_test.go @@ -125,7 +125,12 @@ func makeTempDir() string { func TestRunNotDriver(t *testing.T) { tempDir := makeTempDir() - defer os.RemoveAll(tempDir) + defer func() { //clean up tempdir + err := os.RemoveAll(tempDir) + if err != nil { + t.Errorf("failed to clean up temp folder %q", tempDir) + } + }() StartDriver() if !localbinary.CurrentBinaryIsDockerMachine { t.Fatal("CurrentBinaryIsDockerMachine not set. This will break driver initialization.") diff --git a/pkg/minikube/machine/delete.go b/pkg/minikube/machine/delete.go index 2132d16737..8fc568e43a 100644 --- a/pkg/minikube/machine/delete.go +++ b/pkg/minikube/machine/delete.go @@ -49,6 +49,9 @@ func deleteOrphanedKIC(ociBin string, name string) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() + if err := oci.ShutDown(ociBin, name); err != nil { + glog.Infof("couldn't shut down %s (might be okay): %v ", name, err) + } cmd := exec.CommandContext(ctx, ociBin, "rm", "-f", "-v", name) err = cmd.Run() if err == nil { @@ -77,8 +80,8 @@ func DeleteHost(api libmachine.API, machineName string) error { return mcnerror.ErrHostDoesNotExist{Name: machineName} } - // Hyper-V requires special care to avoid ACPI and file locking issues - if host.Driver.DriverName() == driver.HyperV { + // some drivers need manual shut down before delete to avoid getting stuck. + if driver.NeedsShutdown(host.Driver.DriverName()) { if err := StopHost(api, machineName); err != nil { glog.Warningf("stop host: %v", err) } diff --git a/pkg/minikube/machine/filesync_test.go b/pkg/minikube/machine/filesync_test.go index 143c3e9ab7..a69be410f0 100644 --- a/pkg/minikube/machine/filesync_test.go +++ b/pkg/minikube/machine/filesync_test.go @@ -108,6 +108,12 @@ func TestAssetsFromDir(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { testDir, err := setupTestDir() + defer func() { //clean up tempdir + err := os.RemoveAll(testDir) + if err != nil { + t.Errorf("failed to clean up temp folder %q", testDir) + } + }() if err != nil { t.Errorf("got unexpected error creating test dir: %v", err) return diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 8aa74723e1..0fbf3183d0 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -106,8 +106,9 @@ func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node if serr != nil || s == state.Stopped || s == state.None { // If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine me, err := machineExists(h.Driver.DriverName(), s, serr) - glog.Infof("exists: %v err=%v", me, err) - glog.Infof("%q vs %q", err, constants.ErrMachineMissing) + if err != nil { + glog.Infof("machineExists: %t. err=%v", me, err) + } if !me || err == constants.ErrMachineMissing { out.T(out.Shrug, `{{.driver_name}} "{{.cluster}}" {{.machine_type}} is missing, will recreate.`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index 6db7fb25c6..73adf85b9c 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -102,7 +102,7 @@ func createHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*h glog.Infof("createHost starting for %q (driver=%q)", n.Name, cfg.Driver) start := time.Now() defer func() { - glog.Infof("createHost completed in %s", time.Since(start)) + glog.Infof("duration metric: createHost completed in %s", time.Since(start)) }() if cfg.Driver == driver.VMwareFusion && viper.GetBool(config.ShowDriverDeprecationNotification) { @@ -136,11 +136,11 @@ func createHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*h cstart := time.Now() glog.Infof("libmachine.API.Create for %q (driver=%q)", cfg.Name, cfg.Driver) - // Allow two minutes to create host before failing fast - if err := timedCreateHost(h, api, 2*time.Minute); err != nil { + + if err := timedCreateHost(h, api, 4*time.Minute); err != nil { return nil, errors.Wrap(err, "creating host") } - glog.Infof("libmachine.API.Create for %q took %s", cfg.Name, time.Since(cstart)) + glog.Infof("duration metric: libmachine.API.Create for %q took %s", cfg.Name, time.Since(cstart)) if err := postStartSetup(h, cfg); err != nil { return h, errors.Wrap(err, "post-start") @@ -206,7 +206,7 @@ func postStartSetup(h *host.Host, mc config.ClusterConfig) error { if driver.BareMetal(mc.Driver) { showLocalOsRelease() } - if driver.IsVM(mc.Driver) { + if driver.IsVM(mc.Driver) || driver.IsKIC(mc.Driver) { logRemoteOsRelease(r) } return syncLocalAssets(r) diff --git a/pkg/minikube/machine/stop.go b/pkg/minikube/machine/stop.go index fafe09e446..d5cac2b2d3 100644 --- a/pkg/minikube/machine/stop.go +++ b/pkg/minikube/machine/stop.go @@ -25,6 +25,7 @@ import ( "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/util/retry" @@ -45,8 +46,7 @@ func StopHost(api libmachine.API, machineName string) error { // stop forcibly stops a host without needing to load func stop(h *host.Host) error { start := time.Now() - if h.DriverName == driver.HyperV { - glog.Infof("As there are issues with stopping Hyper-V VMs using API, trying to shut down using SSH") + if driver.NeedsShutdown(h.DriverName) { if err := trySSHPowerOff(h); err != nil { return errors.Wrap(err, "ssh power off") } @@ -61,7 +61,7 @@ func stop(h *host.Host) error { } return &retry.RetriableError{Err: errors.Wrap(err, "stop")} } - glog.Infof("stop complete within %s", time.Since(start)) + glog.Infof("duration metric: stop complete within %s", time.Since(start)) return nil } @@ -78,8 +78,14 @@ func trySSHPowerOff(h *host.Host) error { } out.T(out.Shutdown, `Powering off "{{.profile_name}}" via SSH ...`, out.V{"profile_name": h.Name}) - out, err := h.RunSSHCommand("sudo poweroff") - // poweroff always results in an error, since the host disconnects. - glog.Infof("poweroff result: out=%s, err=%v", out, err) + // differnet for kic because RunSSHCommand is not implemented by kic + if driver.IsKIC(h.DriverName) { + err := oci.ShutDown(h.DriverName, h.Name) + glog.Infof("shutdown container: err=%v", err) + } else { + out, err := h.RunSSHCommand("sudo poweroff") + // poweroff always results in an error, since the host disconnects. + glog.Infof("poweroff result: out=%s, err=%v", out, err) + } return nil } diff --git a/pkg/minikube/node/cache.go b/pkg/minikube/node/cache.go index ffbde22e84..c24fd0a9d9 100644 --- a/pkg/minikube/node/cache.go +++ b/pkg/minikube/node/cache.go @@ -43,11 +43,12 @@ const ( // BeginCacheKubernetesImages caches images required for kubernetes version in the background func beginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVersion string, cRuntime string) { - if download.PreloadExists(k8sVersion, cRuntime) { + // TODO: remove imageRepository check once #7695 is fixed + if imageRepository == "" && download.PreloadExists(k8sVersion, cRuntime) { glog.Info("Caching tarball of preloaded images") err := download.Preload(k8sVersion, cRuntime) if err == nil { - glog.Infof("Finished downloading the preloaded tar for %s on %s", k8sVersion, cRuntime) + glog.Infof("Finished verifying existence of preloaded tar for %s on %s", k8sVersion, cRuntime) return // don't cache individual images if preload is successful. } glog.Warningf("Error downloading preloaded artifacts will continue without preload: %v", err) @@ -100,12 +101,14 @@ func doCacheBinaries(k8sVersion string) error { // BeginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available func beginDownloadKicArtifacts(g *errgroup.Group) { - out.T(out.Pulling, "Pulling base image ...") glog.Info("Beginning downloading kic artifacts") - g.Go(func() error { - glog.Infof("Downloading %s to local daemon", kic.BaseImage) - return image.WriteImageToDaemon(kic.BaseImage) - }) + if !image.ExistsImageInDaemon(kic.BaseImage) { + out.T(out.Pulling, "Pulling base image ...") + g.Go(func() error { + glog.Infof("Downloading %s to local daemon", kic.BaseImage) + return image.WriteImageToDaemon(kic.BaseImage) + }) + } } // WaitDownloadKicArtifacts blocks until the required artifacts for KIC are downloaded. diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 8949f3a602..5f2aecb0d9 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -37,7 +37,6 @@ import ( cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/bootstrapper" - "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify" "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/command" @@ -145,8 +144,8 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { prepareNone() } - // Skip pre-existing, because we already waited for health - if kverify.ShouldWait(starter.Cfg.VerifyComponents) && !starter.PreExists { + // TODO: existing cluster should wait for health #7597 + if !starter.PreExists { if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil { return nil, errors.Wrap(err, "Wait failed") } @@ -156,9 +155,23 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { return nil, errors.Wrap(err, "Updating node") } - cpBs, err := cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, starter.Runner) + // Make sure to use the command runner for the control plane to generate the join token + cp, err := config.PrimaryControlPlane(starter.Cfg) if err != nil { - return nil, errors.Wrap(err, "Getting bootstrapper") + return nil, errors.Wrap(err, "getting primary control plane") + } + h, err := machine.LoadHost(starter.MachineAPI, driver.MachineName(*starter.Cfg, cp)) + if err != nil { + return nil, errors.Wrap(err, "getting control plane host") + } + cpr, err := machine.CommandRunner(h) + if err != nil { + return nil, errors.Wrap(err, "getting control plane command runner") + } + + cpBs, err := cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, cpr) + if err != nil { + return nil, errors.Wrap(err, "getting control plane bootstrapper") } joinCmd, err := cpBs.GenerateToken(*starter.Cfg) @@ -261,24 +274,16 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node, out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) } // Loads cached images, generates config files, download binaries - // update cluster and set up certs in parallel - var parallel sync.WaitGroup - parallel.Add(2) - go func() { - if err := bs.UpdateCluster(cfg); err != nil { - exit.WithError("Failed to update cluster", err) - } - parallel.Done() - }() + // update cluster and set up certs - go func() { - if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil { - exit.WithError("Failed to setup certs", err) - } - parallel.Done() - }() + if err := bs.UpdateCluster(cfg); err != nil { + exit.WithError("Failed to update cluster", err) + } + + if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil { + exit.WithError("Failed to setup certs", err) + } - parallel.Wait() return bs } @@ -398,7 +403,7 @@ func validateNetwork(h *host.Host, r command.Runner, imageRepository string) (st ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY k = strings.ToUpper(k) // for http_proxy & https_proxy if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { - out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"}) + out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/"}) warnedOnce = true } } diff --git a/pkg/minikube/out/style.go b/pkg/minikube/out/style.go index 1cd400d2ae..349b8c4e3f 100644 --- a/pkg/minikube/out/style.go +++ b/pkg/minikube/out/style.go @@ -108,6 +108,7 @@ var styles = map[StyleEnum]style{ Enabling: {Prefix: "🔌 "}, Shutdown: {Prefix: "🛑 "}, Pulling: {Prefix: "🚜 "}, + HealthCheck: {Prefix: "🔎 "}, Verifying: {Prefix: "🤔 "}, VerifyingNoLine: {Prefix: "🤔 ", OmitNewline: true}, Kubectl: {Prefix: "💗 "}, diff --git a/pkg/minikube/out/style_enum.go b/pkg/minikube/out/style_enum.go index 1437b26823..fd28a00f3f 100644 --- a/pkg/minikube/out/style_enum.go +++ b/pkg/minikube/out/style_enum.go @@ -73,6 +73,7 @@ const ( Enabling Shutdown Pulling + HealthCheck Verifying VerifyingNoLine Kubectl diff --git a/pkg/minikube/perf/binary.go b/pkg/minikube/perf/binary.go new file mode 100644 index 0000000000..5fe6d7f6b9 --- /dev/null +++ b/pkg/minikube/perf/binary.go @@ -0,0 +1,108 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package perf + +import ( + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/constants" +) + +type Binary struct { + path string + pr int +} + +const ( + prPrefix = "pr://" +) + +// NewBinary returns a new binary type +func NewBinary(b string) (*Binary, error) { + // If it doesn't have the prefix, assume a path + if !strings.HasPrefix(b, prPrefix) { + return &Binary{ + path: b, + }, nil + } + return newBinaryFromPR(b) +} + +// Name returns the name of the binary +func (b *Binary) Name() string { + if b.pr != 0 { + return fmt.Sprintf("Minikube (PR %d)", b.pr) + } + return filepath.Base(b.path) +} + +// newBinaryFromPR downloads the minikube binary built for the pr by Jenkins from GCS +func newBinaryFromPR(pr string) (*Binary, error) { + pr = strings.TrimPrefix(pr, prPrefix) + // try to convert to int + i, err := strconv.Atoi(pr) + if err != nil { + return nil, errors.Wrapf(err, "converting %s to an integer", pr) + } + + b := &Binary{ + path: localMinikubePath(i), + pr: i, + } + + if err := downloadBinary(remoteMinikubeURL(i), b.path); err != nil { + return nil, errors.Wrapf(err, "downloading minikube") + } + + return b, nil +} + +func remoteMinikubeURL(pr int) string { + return fmt.Sprintf("https://storage.googleapis.com/minikube-builds/%d/minikube-linux-amd64", pr) +} + +func localMinikubePath(pr int) string { + return fmt.Sprintf("%s/minikube-binaries/%d/minikube", constants.DefaultMinipath, pr) +} + +func downloadBinary(url, path string) error { + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + return err + } + + f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0777) + if err != nil { + return err + } + defer f.Close() + + _, err = io.Copy(f, resp.Body) + return err +} diff --git a/pkg/minikube/perf/start.go b/pkg/minikube/perf/start.go index a942863f8c..7b7bd28bf3 100644 --- a/pkg/minikube/perf/start.go +++ b/pkg/minikube/perf/start.go @@ -35,21 +35,26 @@ const ( var ( // For testing - collectTimeMinikubeStart = timeMinikubeStart + collectTimeMinikubeStart = collectTimes ) // CompareMinikubeStart compares the time to run `minikube start` between two minikube binaries -func CompareMinikubeStart(ctx context.Context, out io.Writer, binaries []string) error { - durations, err := collectTimes(ctx, binaries) +func CompareMinikubeStart(ctx context.Context, out io.Writer, binaries []*Binary) error { + durations, err := collectTimeMinikubeStart(ctx, binaries) if err != nil { return err } - fmt.Fprintf(out, "Old binary: %v\nNew binary: %v\nAverage Old: %f\nAverage New: %f\n", durations[0], durations[1], average(durations[0]), average(durations[1])) + for i, d := range durations { + fmt.Fprintf(out, "Results for %s:\n", binaries[i].Name()) + fmt.Fprintf(out, "Times: %v\n", d) + fmt.Fprintf(out, "Average Time: %f\n\n", average(d)) + } + return nil } -func collectTimes(ctx context.Context, binaries []string) ([][]float64, error) { +func collectTimes(ctx context.Context, binaries []*Binary) ([][]float64, error) { durations := make([][]float64, len(binaries)) for i := range durations { durations[i] = make([]float64, runs) @@ -58,9 +63,9 @@ func collectTimes(ctx context.Context, binaries []string) ([][]float64, error) { for r := 0; r < runs; r++ { log.Printf("Executing run %d...", r) for index, binary := range binaries { - duration, err := collectTimeMinikubeStart(ctx, binary) + duration, err := timeMinikubeStart(ctx, binary) if err != nil { - return nil, errors.Wrapf(err, "timing run %d with %s", r, binary) + return nil, errors.Wrapf(err, "timing run %d with %s", r, binary.Name()) } durations[index][r] = duration } @@ -79,12 +84,12 @@ func average(nums []float64) float64 { // timeMinikubeStart returns the time it takes to execute `minikube start` // It deletes the VM after `minikube start`. -func timeMinikubeStart(ctx context.Context, binary string) (float64, error) { - startCmd := exec.CommandContext(ctx, binary, "start") +func timeMinikubeStart(ctx context.Context, binary *Binary) (float64, error) { + startCmd := exec.CommandContext(ctx, binary.path, "start") startCmd.Stdout = os.Stdout startCmd.Stderr = os.Stderr - deleteCmd := exec.CommandContext(ctx, binary, "delete") + deleteCmd := exec.CommandContext(ctx, binary.path, "delete") defer func() { if err := deleteCmd.Run(); err != nil { log.Printf("error deleting minikube: %v", err) diff --git a/pkg/minikube/perf/start_test.go b/pkg/minikube/perf/start_test.go index 539d57500d..2d802f7d9c 100644 --- a/pkg/minikube/perf/start_test.go +++ b/pkg/minikube/perf/start_test.go @@ -19,86 +19,64 @@ package perf import ( "bytes" "context" - "reflect" "testing" + + "github.com/google/go-cmp/cmp" ) -func mockCollectTimeMinikubeStart(durations []float64) func(ctx context.Context, binary string) (float64, error) { - index := 0 - return func(context.Context, string) (float64, error) { - duration := durations[index] - index++ - return duration, nil +func mockCollectTimes(times [][]float64) func(ctx context.Context, binaries []*Binary) ([][]float64, error) { + return func(ctx context.Context, binaries []*Binary) ([][]float64, error) { + return times, nil } } func TestCompareMinikubeStartOutput(t *testing.T) { + binaries := []*Binary{ + { + path: "minikube1", + }, { + path: "minikube2", + }, + } tests := []struct { description string - durations []float64 + times [][]float64 expected string }{ { description: "standard run", - durations: []float64{4.5, 6}, - expected: "Old binary: [4.5]\nNew binary: [6]\nAverage Old: 4.500000\nAverage New: 6.000000\n", + times: [][]float64{{4.5, 6}, {1, 2}}, + expected: `Results for minikube1: +Times: [4.5 6] +Average Time: 5.250000 + +Results for minikube2: +Times: [1 2] +Average Time: 1.500000 + +`, }, } for _, test := range tests { t.Run(test.description, func(t *testing.T) { - originalCollectTimes := collectTimeMinikubeStart - collectTimeMinikubeStart = mockCollectTimeMinikubeStart(test.durations) + originalCollectTimes := collectTimes + collectTimeMinikubeStart = mockCollectTimes(test.times) defer func() { collectTimeMinikubeStart = originalCollectTimes }() buf := bytes.NewBuffer([]byte{}) - err := CompareMinikubeStart(context.Background(), buf, []string{"", ""}) + err := CompareMinikubeStart(context.Background(), buf, binaries) if err != nil { t.Fatalf("error comparing minikube start: %v", err) } actual := buf.String() - if test.expected != actual { - t.Fatalf("actual output does not match expected output\nActual: %v\nExpected: %v", actual, test.expected) + if diff := cmp.Diff(test.expected, actual); diff != "" { + t.Errorf("machines mismatch (-want +got):\n%s", diff) } }) } } - -func TestCollectTimes(t *testing.T) { - tests := []struct { - description string - durations []float64 - expected [][]float64 - }{ - { - description: "test collect time", - durations: []float64{1, 2}, - expected: [][]float64{ - {1}, - {2}, - }, - }, - } - - for _, test := range tests { - t.Run(test.description, func(t *testing.T) { - originalCollectTimes := collectTimeMinikubeStart - collectTimeMinikubeStart = mockCollectTimeMinikubeStart(test.durations) - defer func() { collectTimeMinikubeStart = originalCollectTimes }() - - actual, err := collectTimes(context.Background(), []string{"", ""}) - if err != nil { - t.Fatalf("error collecting times: %v", err) - } - - if !reflect.DeepEqual(actual, test.expected) { - t.Fatalf("actual output does not match expected output\nActual: %v\nExpected: %v", actual, test.expected) - } - }) - } -} - func TestAverage(t *testing.T) { tests := []struct { description string diff --git a/pkg/minikube/problem/err_map.go b/pkg/minikube/problem/err_map.go index da6f8fee95..02ba0dd04e 100644 --- a/pkg/minikube/problem/err_map.go +++ b/pkg/minikube/problem/err_map.go @@ -367,8 +367,8 @@ var vmProblems = map[string]match{ } // proxyDoc is the URL to proxy documentation -const proxyDoc = "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/" -const vpnDoc = "https://minikube.sigs.k8s.io/docs/reference/networking/vpn/" +const proxyDoc = "https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/" +const vpnDoc = "https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/" // netProblems are network related problems. var netProblems = map[string]match{ diff --git a/pkg/minikube/proxy/proxy.go b/pkg/minikube/proxy/proxy.go index a65d29098c..c6a539a8a3 100644 --- a/pkg/minikube/proxy/proxy.go +++ b/pkg/minikube/proxy/proxy.go @@ -40,19 +40,28 @@ func isInBlock(ip string, block string) (bool, error) { return false, fmt.Errorf("CIDR is nil") } + if ip == block { + return true, nil + } + i := net.ParseIP(ip) if i == nil { return false, fmt.Errorf("parsed IP is nil") } - _, b, err := net.ParseCIDR(block) - if err != nil { - return false, errors.Wrapf(err, "Error Parsing block %s", b) + + // check the block if it's CIDR + if strings.Contains(block, "/") { + _, b, err := net.ParseCIDR(block) + if err != nil { + return false, errors.Wrapf(err, "Error Parsing block %s", b) + } + + if b.Contains(i) { + return true, nil + } } - if b.Contains(i) { - return true, nil - } - return false, errors.Wrapf(err, "Error ip not in block") + return false, errors.New("Error ip not in block") } // ExcludeIP takes ip or CIDR as string and excludes it from the http(s)_proxy @@ -101,7 +110,11 @@ func checkEnv(ip string, env string) bool { // Checks if included in IP ranges, i.e., 192.168.39.13/24 noProxyBlocks := strings.Split(v, ",") for _, b := range noProxyBlocks { - if yes, _ := isInBlock(ip, b); yes { + yes, err := isInBlock(ip, b) + if err != nil { + glog.Warningf("fail to check proxy env: %v", err) + } + if yes { return true } } diff --git a/pkg/minikube/proxy/proxy_test.go b/pkg/minikube/proxy/proxy_test.go index 4475905c50..8c87124325 100644 --- a/pkg/minikube/proxy/proxy_test.go +++ b/pkg/minikube/proxy/proxy_test.go @@ -53,8 +53,10 @@ func TestIsInBlock(t *testing.T) { wanntAErr bool }{ {"", "192.168.0.1/32", false, true}, + {"192.168.0.1", "", false, true}, + {"192.168.0.1", "192.168.0.1", true, false}, {"192.168.0.1", "192.168.0.1/32", true, false}, - {"192.168.0.2", "192.168.0.1/32", false, false}, + {"192.168.0.2", "192.168.0.1/32", false, true}, {"192.168.0.1", "192.168.0.1/18", true, false}, {"abcd", "192.168.0.1/18", false, true}, {"192.168.0.1", "foo", false, true}, @@ -122,6 +124,7 @@ func TestCheckEnv(t *testing.T) { {"192.168.0.13", "NO_PROXY", false, ""}, {"192.168.0.13", "NO_PROXY", false, ","}, {"192.168.0.13", "NO_PROXY", true, "192.168.0.13"}, + {"192.168.0.13", "NO_PROXY", false, "192.168.0.14"}, {"192.168.0.13", "NO_PROXY", true, ",192.168.0.13"}, {"192.168.0.13", "NO_PROXY", true, "10.10.0.13,192.168.0.13"}, {"192.168.0.13", "NO_PROXY", true, "192.168.0.13/22"}, diff --git a/pkg/minikube/registry/drvs/docker/docker.go b/pkg/minikube/registry/drvs/docker/docker.go index db82619eb8..88a190897a 100644 --- a/pkg/minikube/registry/drvs/docker/docker.go +++ b/pkg/minikube/registry/drvs/docker/docker.go @@ -97,6 +97,10 @@ func status() registry.State { stderr := strings.TrimSpace(string(exitErr.Stderr)) newErr := fmt.Errorf(`%q %v: %s`, strings.Join(cmd.Args, " "), exitErr, stderr) + if strings.Contains(stderr, "permission denied") && runtime.GOOS == "linux" { + return registry.State{Error: newErr, Installed: true, Healthy: false, Fix: "Add your user to the 'docker' group: 'sudo usermod -aG docker $USER && newgrp docker'", Doc: "https://docs.docker.com/engine/install/linux-postinstall/"} + } + if strings.Contains(stderr, "Cannot connect") || strings.Contains(stderr, "refused") || strings.Contains(stderr, "Is the docker daemon running") { return registry.State{Error: newErr, Installed: true, Healthy: false, Fix: "Start the Docker service", Doc: docURL} } diff --git a/pkg/minikube/registry/global.go b/pkg/minikube/registry/global.go index 3f608ef471..3e92a73bab 100644 --- a/pkg/minikube/registry/global.go +++ b/pkg/minikube/registry/global.go @@ -22,6 +22,7 @@ import ( "sort" "github.com/golang/glog" + "k8s.io/minikube/pkg/minikube/translate" ) const ( @@ -74,7 +75,8 @@ type DriverState struct { func (d DriverState) String() string { if d.Priority == Experimental { - return fmt.Sprintf("%s (experimental)", d.Name) + experimental := translate.T("experimental") + return fmt.Sprintf("%s (%s)", d.Name, experimental) } return d.Name } diff --git a/pkg/minikube/service/service.go b/pkg/minikube/service/service.go index d84cc27f9d..e50d4cc6c6 100644 --- a/pkg/minikube/service/service.go +++ b/pkg/minikube/service/service.go @@ -31,23 +31,17 @@ import ( "github.com/golang/glog" "github.com/olekukonko/tablewriter" "github.com/pkg/errors" - "github.com/spf13/viper" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" typed_core "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/kapi" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/minikube/proxy" "k8s.io/minikube/pkg/util/retry" ) const ( - defaultK8sClientTimeout = 60 * time.Second // DefaultWait is the default wait time, in seconds DefaultWait = 2 // DefaultInterval is the default interval, in seconds @@ -56,8 +50,7 @@ const ( // K8sClient represents a kubernetes client type K8sClient interface { - GetCoreClient() (typed_core.CoreV1Interface, error) - GetClientset(timeout time.Duration) (*kubernetes.Clientset, error) + GetCoreClient(string) (typed_core.CoreV1Interface, error) } // K8sClientGetter can get a K8sClient @@ -71,39 +64,14 @@ func init() { } // GetCoreClient returns a core client -func (k *K8sClientGetter) GetCoreClient() (typed_core.CoreV1Interface, error) { - client, err := k.GetClientset(defaultK8sClientTimeout) +func (k *K8sClientGetter) GetCoreClient(context string) (typed_core.CoreV1Interface, error) { + client, err := kapi.Client(context) if err != nil { - return nil, errors.Wrap(err, "getting clientset") + return nil, errors.Wrap(err, "client") } return client.CoreV1(), nil } -// GetClientset returns a clientset -func (*K8sClientGetter) GetClientset(timeout time.Duration) (*kubernetes.Clientset, error) { - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - profile := viper.GetString(config.ProfileName) - configOverrides := &clientcmd.ConfigOverrides{ - Context: clientcmdapi.Context{ - Cluster: profile, - AuthInfo: profile, - }, - } - kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) - clientConfig, err := kubeConfig.ClientConfig() - if err != nil { - return nil, fmt.Errorf("kubeConfig: %v", err) - } - clientConfig.Timeout = timeout - clientConfig = proxy.UpdateTransport(clientConfig) - client, err := kubernetes.NewForConfig(clientConfig) - if err != nil { - return nil, errors.Wrap(err, "client from config") - } - - return client, nil -} - // SvcURL represents a service URL. Each item in the URLs field combines the service URL with one of the configured // node ports. The PortNames field contains the configured names of the ports in the URLs field (sorted correspondingly - // first item in PortNames belongs to the first item in URLs). @@ -119,8 +87,8 @@ type URLs []SvcURL // GetServiceURLs returns a SvcURL object for every service in a particular namespace. // Accepts a template for formatting -func GetServiceURLs(api libmachine.API, namespace string, t *template.Template) (URLs, error) { - host, err := machine.LoadHost(api, viper.GetString(config.ProfileName)) +func GetServiceURLs(api libmachine.API, cname string, namespace string, t *template.Template) (URLs, error) { + host, err := machine.LoadHost(api, cname) if err != nil { return nil, err } @@ -130,7 +98,7 @@ func GetServiceURLs(api libmachine.API, namespace string, t *template.Template) return nil, err } - client, err := K8s.GetCoreClient() + client, err := K8s.GetCoreClient(cname) if err != nil { return nil, err } @@ -155,8 +123,8 @@ func GetServiceURLs(api libmachine.API, namespace string, t *template.Template) } // GetServiceURLsForService returns a SvcUrl object for a service in a namespace. Supports optional formatting. -func GetServiceURLsForService(api libmachine.API, namespace, service string, t *template.Template) (SvcURL, error) { - host, err := machine.LoadHost(api, viper.GetString(config.ProfileName)) +func GetServiceURLsForService(api libmachine.API, cname string, namespace, service string, t *template.Template) (SvcURL, error) { + host, err := machine.LoadHost(api, cname) if err != nil { return SvcURL{}, errors.Wrap(err, "Error checking if api exist and loading it") } @@ -166,7 +134,7 @@ func GetServiceURLsForService(api libmachine.API, namespace, service string, t * return SvcURL{}, errors.Wrap(err, "Error getting ip from host") } - client, err := K8s.GetCoreClient() + client, err := K8s.GetCoreClient(cname) if err != nil { return SvcURL{}, err } @@ -226,8 +194,8 @@ func printURLsForService(c typed_core.CoreV1Interface, ip, service, namespace st } // CheckService checks if a service is listening on a port. -func CheckService(namespace string, service string) error { - client, err := K8s.GetCoreClient() +func CheckService(cname string, namespace string, service string) error { + client, err := K8s.GetCoreClient(cname) if err != nil { return errors.Wrap(err, "Error getting kubernetes client") } @@ -283,7 +251,7 @@ func (t SVCNotFoundError) Error() string { } // WaitForService waits for a service, and return the urls when available -func WaitForService(api libmachine.API, namespace string, service string, urlTemplate *template.Template, urlMode bool, https bool, +func WaitForService(api libmachine.API, cname string, namespace string, service string, urlTemplate *template.Template, urlMode bool, https bool, wait int, interval int) ([]string, error) { var urlList []string // Convert "Amount of time to wait" and "interval of each check" to attempts @@ -291,18 +259,18 @@ func WaitForService(api libmachine.API, namespace string, service string, urlTem interval = 1 } - err := CheckService(namespace, service) + err := CheckService(cname, namespace, service) if err != nil { return nil, &SVCNotFoundError{err} } - chkSVC := func() error { return CheckService(namespace, service) } + chkSVC := func() error { return CheckService(cname, namespace, service) } if err := retry.Expo(chkSVC, time.Duration(interval)*time.Second, time.Duration(wait)*time.Second); err != nil { return nil, &SVCNotFoundError{err} } - serviceURL, err := GetServiceURLsForService(api, namespace, service, urlTemplate) + serviceURL, err := GetServiceURLsForService(api, cname, namespace, service, urlTemplate) if err != nil { return urlList, errors.Wrap(err, "Check that minikube is running and that you have specified the correct namespace") } @@ -330,8 +298,8 @@ func WaitForService(api libmachine.API, namespace string, service string, urlTem } // GetServiceListByLabel returns a ServiceList by label -func GetServiceListByLabel(namespace string, key string, value string) (*core.ServiceList, error) { - client, err := K8s.GetCoreClient() +func GetServiceListByLabel(cname string, namespace string, key string, value string) (*core.ServiceList, error) { + client, err := K8s.GetCoreClient(cname) if err != nil { return &core.ServiceList{}, &retry.RetriableError{Err: err} } @@ -349,8 +317,8 @@ func getServiceListFromServicesByLabel(services typed_core.ServiceInterface, key } // CreateSecret creates or modifies secrets -func CreateSecret(namespace, name string, dataValues map[string]string, labels map[string]string) error { - client, err := K8s.GetCoreClient() +func CreateSecret(cname string, namespace, name string, dataValues map[string]string, labels map[string]string) error { + client, err := K8s.GetCoreClient(cname) if err != nil { return &retry.RetriableError{Err: err} } @@ -363,7 +331,7 @@ func CreateSecret(namespace, name string, dataValues map[string]string, labels m // Delete existing secret if len(secret.Name) > 0 { - err = DeleteSecret(namespace, name) + err = DeleteSecret(cname, namespace, name) if err != nil { return &retry.RetriableError{Err: err} } @@ -394,8 +362,8 @@ func CreateSecret(namespace, name string, dataValues map[string]string, labels m } // DeleteSecret deletes a secret from a namespace -func DeleteSecret(namespace, name string) error { - client, err := K8s.GetCoreClient() +func DeleteSecret(cname string, namespace, name string) error { + client, err := K8s.GetCoreClient(cname) if err != nil { return &retry.RetriableError{Err: err} } diff --git a/pkg/minikube/service/service_test.go b/pkg/minikube/service/service_test.go index 449c55343b..bb37c8a3ae 100644 --- a/pkg/minikube/service/service_test.go +++ b/pkg/minikube/service/service_test.go @@ -26,8 +26,6 @@ import ( "testing" "text/template" - "time" - "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/host" "github.com/pkg/errors" @@ -35,7 +33,6 @@ import ( core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes" typed_core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/kubernetes/typed/core/v1/fake" testing_fake "k8s.io/client-go/testing" @@ -55,7 +52,7 @@ type MockClientGetter struct { // Force GetCoreClient to fail var getCoreClientFail bool -func (m *MockClientGetter) GetCoreClient() (typed_core.CoreV1Interface, error) { +func (m *MockClientGetter) GetCoreClient(string) (typed_core.CoreV1Interface, error) { if getCoreClientFail { return nil, fmt.Errorf("test Error - Mocked Get") } @@ -65,10 +62,6 @@ func (m *MockClientGetter) GetCoreClient() (typed_core.CoreV1Interface, error) { secretsMap: m.secretsMap}, nil } -func (m *MockClientGetter) GetClientset(timeout time.Duration) (*kubernetes.Clientset, error) { - return nil, nil -} - func (m *MockCoreClient) Secrets(ns string) typed_core.SecretInterface { return &fake.FakeSecrets{Fake: &fake.FakeCoreV1{Fake: &testing_fake.Fake{}}} } @@ -476,7 +469,7 @@ func TestGetServiceURLs(t *testing.T) { servicesMap: serviceNamespaces, endpointsMap: endpointNamespaces, } - urls, err := GetServiceURLs(test.api, test.namespace, defaultTemplate) + urls, err := GetServiceURLs(test.api, "minikube", test.namespace, defaultTemplate) if err != nil && !test.err { t.Errorf("Error GetServiceURLs %v", err) } @@ -544,7 +537,7 @@ func TestGetServiceURLsForService(t *testing.T) { servicesMap: serviceNamespaces, endpointsMap: endpointNamespaces, } - svcURL, err := GetServiceURLsForService(test.api, test.namespace, test.service, defaultTemplate) + svcURL, err := GetServiceURLsForService(test.api, "minikube", test.namespace, test.service, defaultTemplate) if err != nil && !test.err { t.Errorf("Error GetServiceURLsForService %v", err) } @@ -626,7 +619,7 @@ users: os.Setenv("KUBECONFIG", mockK8sConfigPath) k8s := K8sClientGetter{} - _, err = k8s.GetCoreClient() + _, err = k8s.GetCoreClient("minikube") if err != nil && !test.err { t.Fatalf("GetCoreClient returned unexpected error: %v", err) } @@ -691,7 +684,7 @@ func TestGetServiceListByLabel(t *testing.T) { secretsMap: secretsNamespaces, } getCoreClientFail = test.failedGetClient - svcs, err := GetServiceListByLabel(test.ns, test.name, test.label) + svcs, err := GetServiceListByLabel("minikube", test.ns, test.name, test.label) if err != nil && !test.err { t.Fatalf("Test %v got unexpected error: %v", test.description, err) } @@ -741,7 +734,7 @@ func TestCheckService(t *testing.T) { secretsMap: secretsNamespaces, } getCoreClientFail = test.failedGetClient - err := CheckService(test.ns, test.name) + err := CheckService("minikube", test.ns, test.name) if err == nil && test.err { t.Fatalf("Test %v expected error but got nil", test.description) } @@ -780,7 +773,7 @@ func TestDeleteSecret(t *testing.T) { secretsMap: secretsNamespaces, } getCoreClientFail = test.failedGetClient - err := DeleteSecret(test.ns, test.name) + err := DeleteSecret("minikube", test.ns, test.name) if err == nil && test.err { t.Fatalf("Test %v expected error but got nil", test.description) } @@ -819,7 +812,7 @@ func TestCreateSecret(t *testing.T) { secretsMap: secretsNamespaces, } getCoreClientFail = test.failedGetClient - err := CreateSecret(test.ns, test.name, map[string]string{"ns": "secret"}, map[string]string{"ns": "baz"}) + err := CreateSecret("minikube", test.ns, test.name, map[string]string{"ns": "secret"}, map[string]string{"ns": "baz"}) if err == nil && test.err { t.Fatalf("Test %v expected error but got nil", test.description) } @@ -921,7 +914,7 @@ func TestWaitAndMaybeOpenService(t *testing.T) { } var urlList []string - urlList, err := WaitForService(test.api, test.namespace, test.service, defaultTemplate, test.urlMode, test.https, 1, 0) + urlList, err := WaitForService(test.api, "minikube", test.namespace, test.service, defaultTemplate, test.urlMode, test.https, 1, 0) if test.err && err == nil { t.Fatalf("WaitForService expected to fail for test: %v", test) } @@ -986,7 +979,7 @@ func TestWaitAndMaybeOpenServiceForNotDefaultNamspace(t *testing.T) { servicesMap: serviceNamespaceOther, endpointsMap: endpointNamespaces, } - _, err := WaitForService(test.api, test.namespace, test.service, defaultTemplate, test.urlMode, test.https, 1, 0) + _, err := WaitForService(test.api, "minikube", test.namespace, test.service, defaultTemplate, test.urlMode, test.https, 1, 0) if test.err && err == nil { t.Fatalf("WaitForService expected to fail for test: %v", test) } diff --git a/pkg/minikube/storageclass/storageclass.go b/pkg/minikube/storageclass/storageclass.go index fa6bb19a12..8273ceff27 100644 --- a/pkg/minikube/storageclass/storageclass.go +++ b/pkg/minikube/storageclass/storageclass.go @@ -22,9 +22,8 @@ import ( "github.com/pkg/errors" v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" - "k8s.io/client-go/tools/clientcmd" + "k8s.io/minikube/pkg/kapi" ) func annotateDefaultStorageClass(storage storagev1.StorageV1Interface, class *v1.StorageClass, enable bool) error { @@ -71,25 +70,11 @@ func SetDefaultStorageClass(storage storagev1.StorageV1Interface, name string) e } // GetStoragev1 return storage v1 interface for client -func GetStoragev1() (storagev1.StorageV1Interface, error) { - client, err := getClient() +func GetStoragev1(context string) (storagev1.StorageV1Interface, error) { + client, err := kapi.Client(context) if err != nil { return nil, err } sv1 := client.StorageV1() return sv1, nil } - -func getClient() (*kubernetes.Clientset, error) { - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) - config, err := kubeConfig.ClientConfig() - if err != nil { - return nil, errors.Wrap(err, "Error creating kubeConfig") - } - client, err := kubernetes.NewForConfig(config) - if err != nil { - return nil, errors.Wrap(err, "Error creating new client from kubeConfig.ClientConfig()") - } - return client, nil -} diff --git a/pkg/minikube/storageclass/storageclass_test.go b/pkg/minikube/storageclass/storageclass_test.go index ea954e94f7..d0838f2fd3 100644 --- a/pkg/minikube/storageclass/storageclass_test.go +++ b/pkg/minikube/storageclass/storageclass_test.go @@ -212,45 +212,6 @@ users: - name: minikube ` -func TestGetClient(t *testing.T) { - var tests = []struct { - description string - config string - err bool - }{ - { - description: "ok", - config: mockK8sConfig, - }, - { - description: "no valid config", - config: "this is not valid config", - err: true, - }, - } - configFile, err := ioutil.TempFile("/tmp", "") - if err != nil { - t.Fatalf(err.Error()) - } - defer os.Remove(configFile.Name()) - for _, test := range tests { - t.Run(test.description, func(t *testing.T) { - - if err := setK8SConfig(test.config, configFile.Name()); err != nil { - t.Fatalf(err.Error()) - } - - _, err = getClient() - if err != nil && !test.err { - t.Fatalf("Unexpected err: %v for test: %v", err, test.description) - } - if err == nil && test.err { - t.Fatalf("Expected err for test: %v", test.description) - } - }) - } -} - func TestGetStoragev1(t *testing.T) { var tests = []struct { description string @@ -278,7 +239,8 @@ func TestGetStoragev1(t *testing.T) { t.Fatalf(err.Error()) } - _, err = GetStoragev1() + // context name is hardcoded by mockK8sConfig + _, err = GetStoragev1("minikube") if err != nil && !test.err { t.Fatalf("Unexpected err: %v for test: %v", err, test.description) } diff --git a/pkg/minikube/tunnel/route_darwin.go b/pkg/minikube/tunnel/route_darwin.go index 0e2f5064ff..6adcfb5701 100644 --- a/pkg/minikube/tunnel/route_darwin.go +++ b/pkg/minikube/tunnel/route_darwin.go @@ -22,10 +22,12 @@ import ( "net" "os" "os/exec" + "path/filepath" "regexp" "strings" "github.com/golang/glog" + "github.com/pkg/errors" ) func (router *osRouter) EnsureRouteIsAdded(route *Route) error { @@ -37,7 +39,7 @@ func (router *osRouter) EnsureRouteIsAdded(route *Route) error { return nil } if err := writeResolverFile(route); err != nil { - return fmt.Errorf("could not write /etc/resolver/{cluster_domain} file: %s", err) + glog.Errorf("DNS forwarding unavailable: %v", err) } serviceCIDR := route.DestCIDR.String() @@ -178,26 +180,48 @@ func (router *osRouter) Cleanup(route *Route) error { func writeResolverFile(route *Route) error { resolverFile := "/etc/resolver/" + route.ClusterDomain + content := fmt.Sprintf("nameserver %s\nsearch_order 1\n", route.ClusterDNSIP) - // write resolver content into tmpFile, then copy it to /etc/resolver/clusterDomain - tmpFile, err := ioutil.TempFile("", "minikube-tunnel-resolver-") + + glog.Infof("preparing DNS forwarding config in %q:\n%s", resolverFile, content) + + // write resolver content into tf, then copy it to /etc/resolver/clusterDomain + tf, err := ioutil.TempFile("", "minikube-tunnel-resolver-") if err != nil { - return err + return errors.Wrap(err, "tempfile") } - defer os.Remove(tmpFile.Name()) - if _, err = tmpFile.WriteString(content); err != nil { - return err + defer os.Remove(tf.Name()) + + if _, err = tf.WriteString(content); err != nil { + return errors.Wrap(err, "write") } - if err = tmpFile.Close(); err != nil { - return err + + if err = tf.Close(); err != nil { + return errors.Wrap(err, "close") } - cmd := exec.Command("sudo", "mkdir", "-p", "/etc/resolver") - if err := cmd.Run(); err != nil { - return err + + if err = os.Chmod(tf.Name(), 0644); err != nil { + return errors.Wrap(err, "chmod") } - cmd = exec.Command("sudo", "cp", "-f", tmpFile.Name(), resolverFile) - if err := cmd.Run(); err != nil { - return err + + cmd := exec.Command("sudo", "mkdir", "-p", filepath.Dir(resolverFile)) + _, err = cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return fmt.Errorf("%q failed: %v: %q", strings.Join(cmd.Args, " "), exitErr, exitErr.Stderr) + } + return errors.Wrap(err, "mkdir") } + + cmd = exec.Command("sudo", "cp", "-fp", tf.Name(), resolverFile) + + _, err = cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return fmt.Errorf("%q failed: %v: %q", strings.Join(cmd.Args, " "), exitErr, exitErr.Stderr) + } + return errors.Wrap(err, "copy") + } + glog.Infof("DNS forwarding now configured in %q", resolverFile) return nil } diff --git a/pkg/provision/buildroot.go b/pkg/provision/buildroot.go index 2f630f9332..50fb6a28ae 100644 --- a/pkg/provision/buildroot.go +++ b/pkg/provision/buildroot.go @@ -177,7 +177,7 @@ func (p *BuildrootProvisioner) Provision(swarmOptions swarm.Options, authOptions return nil } - err := retry.Expo(configAuth, time.Second, 2*time.Minute) + err := retry.Expo(configAuth, 100*time.Microsecond, 2*time.Minute) if err != nil { glog.Infof("Error configuring auth during provisioning %v", err) return err diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index c3c2287e67..de724ba014 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -82,7 +82,7 @@ func configureAuth(p miniProvisioner) error { glog.Infof("configureAuth start") start := time.Now() defer func() { - glog.Infof("configureAuth took %s", time.Since(start)) + glog.Infof("duration metric: configureAuth took %s", time.Since(start)) }() driver := p.GetDriver() @@ -292,7 +292,7 @@ func updateUnit(p provision.SSHCommander, name string, content string, dst strin if _, err := p.SSHCommand(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s.new", path.Dir(dst), content, dst)); err != nil { return err } - if _, err := p.SSHCommand(fmt.Sprintf("sudo diff -u %s %s.new || { sudo mv %s.new %s; sudo systemctl -f daemon-reload && sudo sudo systemctl -f restart %s; }", dst, dst, dst, dst, name)); err != nil { + if _, err := p.SSHCommand(fmt.Sprintf("sudo diff -u %s %s.new || { sudo mv %s.new %s; sudo systemctl -f daemon-reload && sudo systemctl -f enable %s && sudo systemctl -f restart %s; }", dst, dst, dst, dst, name, name)); err != nil { return err } return nil diff --git a/pkg/provision/ubuntu.go b/pkg/provision/ubuntu.go index 3fbf006b69..29349cd66c 100644 --- a/pkg/provision/ubuntu.go +++ b/pkg/provision/ubuntu.go @@ -180,7 +180,7 @@ func (p *UbuntuProvisioner) Provision(swarmOptions swarm.Options, authOptions au return nil } - err := retry.Expo(configAuth, time.Second, 2*time.Minute) + err := retry.Expo(configAuth, 100*time.Microsecond, 2*time.Minute) if err != nil { glog.Infof("Error configuring auth during provisioning %v", err) diff --git a/pkg/util/crypto_test.go b/pkg/util/crypto_test.go index bbce6bcb6f..aa19852591 100644 --- a/pkg/util/crypto_test.go +++ b/pkg/util/crypto_test.go @@ -30,10 +30,15 @@ import ( func TestGenerateCACert(t *testing.T) { tmpDir, err := ioutil.TempDir("", "") + defer func() { //clean up tempdir + err := os.RemoveAll(tmpDir) + if err != nil { + t.Errorf("failed to clean up temp folder %q", tmpDir) + } + }() if err != nil { t.Fatalf("Error generating tmpdir: %v", err) } - defer os.RemoveAll(tmpDir) certPath := filepath.Join(tmpDir, "cert") keyPath := filepath.Join(tmpDir, "key") @@ -58,16 +63,26 @@ func TestGenerateCACert(t *testing.T) { func TestGenerateSignedCert(t *testing.T) { tmpDir, err := ioutil.TempDir("", "") + defer func() { //clean up tempdir + err := os.RemoveAll(tmpDir) + if err != nil { + t.Errorf("failed to clean up temp folder %q", tmpDir) + } + }() if err != nil { t.Fatalf("Error generating tmpdir: %v", err) } - defer os.RemoveAll(tmpDir) signerTmpDir, err := ioutil.TempDir("", "") + defer func() { //clean up tempdir + err := os.RemoveAll(signerTmpDir) + if err != nil { + t.Errorf("failed to clean up temp folder %q", signerTmpDir) + } + }() if err != nil { t.Fatalf("Error generating signer tmpdir: %v", err) } - defer os.RemoveAll(signerTmpDir) validSignerCertPath := filepath.Join(signerTmpDir, "cert") validSignerKeyPath := filepath.Join(signerTmpDir, "key") diff --git a/site/assets/scss/_variables_project.scss b/site/assets/scss/_variables_project.scss index 768b6b8662..7bc0efdb22 100644 --- a/site/assets/scss/_variables_project.scss +++ b/site/assets/scss/_variables_project.scss @@ -40,7 +40,7 @@ body, p { } -h1,h2,h3,h4,h5 { +h1,h2,h3,h4,h5,.navbar-brand { font-family: 'Open Sans', sans-serif !important; } diff --git a/site/config.toml b/site/config.toml index 7848140bf2..70982d6560 100644 --- a/site/config.toml +++ b/site/config.toml @@ -29,10 +29,6 @@ pygmentsStyle = "tango" # First one is picked as the Twitter card image if not set on page. #images = ["images/project-illustration.png"] -# Configure how URLs look like per section. -[permalinks] -blog = "/:section/:year/:month/:day/:slug/" - [markup] [markup.highlight] codeFences = true @@ -76,7 +72,7 @@ weight = 1 [params] copyright = "The Kubernetes Authors -- " # The latest release of minikube -latest_release = "1.9.1" +latest_release = "1.9.2" privacy_policy = "" @@ -125,12 +121,12 @@ no = 'Sorry to hear that. Please How to develop minikube addons --- +## Testing Addon changes + +Build the minikube binary: + +```shell +make +``` + +Apply addon from your newly built minikube binary: + +```shell +./out/minikube addons enable +``` + ## Adding a New Addon To add a new addon to minikube the following steps are required: @@ -62,6 +76,3 @@ To add a new addon to minikube the following steps are required: }, false, "efk"), } ``` - -* Rebuild minikube using `make out/minikube`. This will put the addon's .yaml binary files into the minikube binary using go-bindata. -* Test addon using `minikube addons enable ` command to start service. diff --git a/site/content/en/docs/contrib/building/iso.md b/site/content/en/docs/contrib/building/iso.md index 3d5eabe70b..8fd818bd38 100644 --- a/site/content/en/docs/contrib/building/iso.md +++ b/site/content/en/docs/contrib/building/iso.md @@ -83,9 +83,9 @@ iso directory after they've been selected. To add your own package to the minikube ISO, create a package directory under `iso/minikube-iso/package`. This directory will require at least 3 files: -`.mk` - A Makefile describing how to download the source code and build the program -`.hash` - Checksums to verify the downloaded source code -`Config.in` - buildroot configuration. +`.mk` - A Makefile describing how to download the source code and build the program +`.hash` - Checksums to verify the downloaded source code +`Config.in` - buildroot configuration For a relatively simple example to start with, you may want to reference the `podman` package. diff --git a/site/content/en/docs/contrib/documentation.en.md b/site/content/en/docs/contrib/documentation.en.md index f2bab66420..23e8408c9d 100644 --- a/site/content/en/docs/contrib/documentation.en.md +++ b/site/content/en/docs/contrib/documentation.en.md @@ -3,6 +3,8 @@ linkTitle: "Documentation" title: "Contributing to minikube documentation" date: 2019-07-31 weight: 2 +aliases: + - /docs/contribution-guidelines/ --- minikube's documentation is in [Markdown](https://www.markdownguide.org/cheat-sheet/), and generated using the following tools: diff --git a/site/content/en/docs/contrib/testing.en.md b/site/content/en/docs/contrib/testing.en.md index bc8e02eae5..2f88cf99d9 100644 --- a/site/content/en/docs/contrib/testing.en.md +++ b/site/content/en/docs/contrib/testing.en.md @@ -16,8 +16,9 @@ make test ### Integration Tests -Integration tests are currently run manually. -To run them, build the binary and run the tests: +#### The basics + +From the minikube root directory, build the binary and run the tests: ```shell make integration @@ -29,6 +30,34 @@ You may find it useful to set various options to test only a particular test aga env TEST_ARGS="-minikube-start-args=--driver=hyperkit -test.run TestStartStop" make integration ``` +#### Quickly iterating on a single test + +Run a single test on an active cluster: + +```shell +make integration -e TEST_ARGS="-test.run TestFunctional/parallel/MountCmd --profile=minikube --cleanup=false" +``` + +WARNING: For this to work repeatedly, the test must be written so that it cleans up after itself. + +The `--cleanup=false` test arg ensures that the cluster will not be deleted after the test is run. + +See [main.go](https://github.com/kubernetes/minikube/blob/master/test/integration/main.go) for details. + +#### Disabling parallelism + +```shell +make integration -e TEST_ARGS="-test.parallel=1" +``` + +#### Testing philosophy + +- Tests should be so simple as to be correct by inspection +- Readers should need to read only the test body to understand the test +- Top-to-bottom readability is more important than code de-duplication + +Tests are typically read with a great air of skepticism, because chances are they are being read only when things are broken. + ### Conformance Tests These are Kubernetes tests that run against an arbitrary cluster and exercise a wide range of Kubernetes features. diff --git a/site/content/en/docs/contrib/triage.md b/site/content/en/docs/contrib/triage.md index 93b3c403e5..b3975e95cb 100644 --- a/site/content/en/docs/contrib/triage.md +++ b/site/content/en/docs/contrib/triage.md @@ -7,6 +7,11 @@ description: > How to triage issues in the minikube repo --- +Community triage takes place **every Wednesday** from **11AM-12PM PST**. +Zoom link: https://zoom.us/j/5042173647 + +All community members are welcome and encouraged to join and help us triage minikube! + Triage is an important part of maintaining the health of the minikube repo. A well organized repo allows maintainers to prioritize feature requests, fix bugs, and respond to users facing difficulty with the tool as quickly as possible. diff --git a/site/content/en/docs/drivers/docker.md b/site/content/en/docs/drivers/docker.md index 248428753d..ad5949d0ce 100644 --- a/site/content/en/docs/drivers/docker.md +++ b/site/content/en/docs/drivers/docker.md @@ -27,11 +27,14 @@ The Docker driver allows you to install Kubernetes into an existing Docker insta `sudo mkdir /sys/fs/cgroup/systemd && sudo mount -t cgroup -o none,name=systemd cgroup /sys/fs/cgroup/systemd`. -- Addon 'registry' for mac and windows is not supported yet and it is [a work in progress](https://github.com/kubernetes/minikube/issues/7535). - - ## Troubleshooting - On macOS or Windows, you may need to restart Docker for Desktop if a command gets hung + - Run `--alsologtostderr -v=1` for extra debugging information + +- On Linux, if you want to run MySQL pod, you need to disable AppArmor for mysql profile + + If your docker has [AppArmor](https://wiki.ubuntu.com/AppArmor) enabled, running mysql in privileged mode with docker driver will have the issue [#7401](https://github.com/kubernetes/minikube/issues/7401). + There is a workaround - see [moby/moby#7512](https://github.com/moby/moby/issues/7512#issuecomment-61787845). diff --git a/site/content/en/docs/drivers/includes/none_usage.inc b/site/content/en/docs/drivers/includes/none_usage.inc index f3f915a076..fd5c7d55ba 100644 --- a/site/content/en/docs/drivers/includes/none_usage.inc +++ b/site/content/en/docs/drivers/includes/none_usage.inc @@ -2,6 +2,8 @@ VM running a systemd-based Linux distribution ([see #2704](https://github.com/kubernetes/minikube/issues/2704)) +kubeadm requirements listed here - https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/ + ## Usage The none driver requires minikube to be run as root, until [#3760](https://github.com/kubernetes/minikube/issues/3760) can be addressed. diff --git a/site/content/en/docs/drivers/vmware.md b/site/content/en/docs/drivers/vmware.md index 89403bd4b3..7351904e4e 100644 --- a/site/content/en/docs/drivers/vmware.md +++ b/site/content/en/docs/drivers/vmware.md @@ -23,7 +23,7 @@ No documentation is available yet. ## Issues -* [Full list of open 'vmware' driver issues](https://github.com/kubernetes/minikube/labels/co%2Fvmware) +* [Full list of open 'vmware-driver' issues](https://github.com/kubernetes/minikube/labels/co%2Fvmware-driver) ## Troubleshooting diff --git a/site/content/en/docs/handbook/config.md b/site/content/en/docs/handbook/config.md index 3578b27ab2..8d6e2995de 100644 --- a/site/content/en/docs/handbook/config.md +++ b/site/content/en/docs/handbook/config.md @@ -93,10 +93,10 @@ The default container runtime in minikube is Docker. You can select it explicitl minikube start --container-runtime=docker ``` -You can also select: +Other options available are: -* *[containerd](https://github.com/containerd/containerd): -* `cri-o`: [CRI-O](https://github.com/kubernetes-sigs/cri-o): +* [containerd](https://github.com/containerd/containerd) +* [crio](https://github.com/kubernetes-sigs/cri-o) ## Environment variables diff --git a/site/content/en/docs/handbook/pushing.md b/site/content/en/docs/handbook/pushing.md index 32c58edf2f..7a0646c7b0 100644 --- a/site/content/en/docs/handbook/pushing.md +++ b/site/content/en/docs/handbook/pushing.md @@ -15,13 +15,13 @@ The best method to push your image to minikube depends on the container-runtime Here is a comparison table to help you choose: -| Method | Supported Runtimes | Issues | Performance | +| Method | Supported Runtimes | | Performance | |--- |--- |--- |--- |--- | -| [docker-env command](/docs/handbook/pushing/#1pushing-directly-to-the-in-cluster-docker-daemon-docker-env) | only docker | | good | -| [podman-env command](/docs/handbook/pushing/#3-pushing-directly-to-in-cluster-crio-podman-env) | only cri-o | | good | -| [cache add command](/pushing/#2-push-images-using-cache-command) | all | | ok | -| [registry addon](/docs/handbook/pushing/#4-pushing-to-an-in-cluster-using-registry-addon) | all | work in progress for [docker on mac](https://github.com/kubernetes/minikube/issues/7535) | ok | -| [minikube ssh](/docs/handbook/pushing/#5-building-images-inside-of-minikube-using-ssh) | all | | best | +| [docker-env command](/docs/handbook/pushing/#1pushing-directly-to-the-in-cluster-docker-daemon-docker-env) | only docker | good | +| [podman-env command](/docs/handbook/pushing/#3-pushing-directly-to-in-cluster-crio-podman-env) | only cri-o | good | +| [cache add command](/pushing/#2-push-images-using-cache-command) | all | ok | +| [registry addon](/docs/handbook/pushing/#4-pushing-to-an-in-cluster-using-registry-addon) | all | ok | +| [minikube ssh](/docs/handbook/pushing/#5-building-images-inside-of-minikube-using-ssh) | all | best | * note1 : the default container-runtime on minikube is 'docker'. diff --git a/site/content/en/docs/handbook/troubleshooting.md b/site/content/en/docs/handbook/troubleshooting.md index 0f8e4a1432..930daa4674 100644 --- a/site/content/en/docs/handbook/troubleshooting.md +++ b/site/content/en/docs/handbook/troubleshooting.md @@ -7,18 +7,15 @@ description: > ## Enabling debug logs -To debug issues with minikube (not *Kubernetes* but **minikube** itself), you can use the `-v` flag to see debug level info. The specified values for `-v` will do the following (the values are all encompassing in that higher values will give you all lower value outputs as well): +Pass `--alsologtostderr` to minikube commands to see detailed log output output. To increase the log verbosity, you can use: -* `--v=0` will output **INFO** level logs -* `--v=1` will output **WARNING** level logs -* `--v=2` will output **ERROR** level logs - -* `--v=3` will output *libmachine* logging -* `--v=7` will output *libmachine --debug* level logging +* `-v=1`: verbose messages +* `-v=2`: really verbose messages +* `-v=8`: more log messages than you can possibly handle. Example: -`minikube start --v=7` will start minikube and output all the important debug logs to stdout. +`minikube start --alsologtostderr --v=2` will start minikube and output all the important debug logs to stderr. ## Gathering VM logs diff --git a/site/content/en/docs/handbook/vpn_and_proxy.md b/site/content/en/docs/handbook/vpn_and_proxy.md index 08c8e1c5ff..3d430b10b4 100644 --- a/site/content/en/docs/handbook/vpn_and_proxy.md +++ b/site/content/en/docs/handbook/vpn_and_proxy.md @@ -1,8 +1,11 @@ --- -title: "Proxies & VPN's" +title: "Proxies and VPNs" weight: 6 description: > How to use minikube with a VPN or HTTP/HTTPS Proxy +aliases: + - /docs/reference/networking/vpn + - /docs/reference/networking/proxy --- minikube requires access to the internet via HTTP, HTTPS, and DNS protocols. diff --git a/site/content/en/docs/start/_index.md b/site/content/en/docs/start/_index.md index df077d6a8b..a6a991d86c 100644 --- a/site/content/en/docs/start/_index.md +++ b/site/content/en/docs/start/_index.md @@ -115,32 +115,46 @@ kubectl create deployment hello-minikube --image=k8s.gcr.io/echoserver:1.4 kubectl expose deployment hello-minikube --type=NodePort --port=8080 ``` -Find your cluster IP: +It may take a moment, but your deployment will soon show up when you run: ```shell -minikube ip +kubectl get services hello-minikube ``` -Either navigate to <your ip>:8080 in your web browser, or let minikube do it for you: +The easiest way to access this service is to let minikube launch a web browser for you: ```shell minikube service hello-minikube ``` -To access a LoadBalancer application, use the "minikube tunnel" feature. Here is an example deployment: +Alternatively, use kubectl to forward the port: + +```shell +kubectl port-forward service/hello-minikube 7080:8080 +``` + +Tada! Your application is now available at [http://localhost:7080/](http://localhost:7080/) + +### LoadBalancer deployments + +To access a LoadBalancer deployment, use the "minikube tunnel" command. Here is an example deployment: ```shell kubectl create deployment balanced --image=k8s.gcr.io/echoserver:1.4 -kubectl expose deployment balanced --type=LoadBalancer --port=8081 +kubectl expose deployment balanced --type=LoadBalancer --port=8000 ``` -In another window, start the tunnel to create a routable IP for the deployment: +In another window, start the tunnel to create a routable IP for the 'balanced' deployment: ```shell minikube tunnel ``` -Access the application using the "service" command, or your web browser. If you are using macOS, minikube will also forward DNS requests for you: [http://balanced.default.svc.cluster.local:8081/](http://balanced.default.svc.cluster.local:8081/) +To find the routable IP, run this command and examine the `EXTERNAL-IP` column: + +`kubectl get services balanced` + +Your deployment is now available at <EXTERNAL-IP>:8000

5Manage your cluster

@@ -187,4 +201,4 @@ minikube delete --all * [Community-contributed tutorials]({{}}) * [minikube command reference]({{}}) * [Contributors guide]({{}}) -* Take our [fast 5-question survey](https://forms.gle/Gg3hG5ZySw8c1C24A) to share your thoughts 🙏 \ No newline at end of file +* Take our [fast 5-question survey](https://forms.gle/Gg3hG5ZySw8c1C24A) to share your thoughts 🙏 diff --git a/site/content/en/docs/tutorials/multi_node.md b/site/content/en/docs/tutorials/multi_node.md index 88ec4bfd5f..5a4a5c05c7 100644 --- a/site/content/en/docs/tutorials/multi_node.md +++ b/site/content/en/docs/tutorials/multi_node.md @@ -21,12 +21,13 @@ date: 2019-11-24 minikube start --nodes 2 -p multinode-demo --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.244.0.0/16 😄 [multinode-demo] minikube v1.9.2 on Darwin 10.14.6 ✨ Automatically selected the hyperkit driver -👍 Starting control plane node m01 in cluster multinode-demo +👍 Starting control plane node multinode-demo in cluster multinode-demo 🔥 Creating hyperkit VM (CPUs=2, Memory=4000MB, Disk=20000MB) ... 🐳 Preparing Kubernetes v1.18.0 on Docker 19.03.8 ... + ▪ kubeadm.pod-network-cidr=10.244.0.0/16 🌟 Enabling addons: default-storageclass, storage-provisioner -👍 Starting node m02 in cluster multinode-demo +👍 Starting node multinode-demo-m02 in cluster multinode-demo 🔥 Creating hyperkit VM (CPUs=2, Memory=4000MB, Disk=20000MB) ... 🌐 Found network options: ▪ NO_PROXY=192.168.64.213 @@ -42,6 +43,22 @@ multinode-demo Ready master 9m58s v1.18.0 multinode-demo-m02 Ready 9m5s v1.18.0 ``` +NOTE: You can also check the status of your nodes: +``` +$ minikube status +multinode-demo +type: Control Plane +host: Running +kubelet: Running +apiserver: Running +kubeconfig: Configured + +multinode-demo-m02 +type: Worker +host: Running +kubelet: Running +``` + - Install a CNI (e.g. flannel): NOTE: This currently needs to be done manually after the apiserver is running, the multi-node feature is still experimental as of 1.9.2. ``` diff --git a/site/layouts/community/list.html b/site/layouts/community/list.html new file mode 100644 index 0000000000..d66a50ed69 --- /dev/null +++ b/site/layouts/community/list.html @@ -0,0 +1,19 @@ +{{ define "main" }} + +
+
+
+ +

Join the {{ .Site.Title }} community

+ +

{{ .Site.Title }} is an open source project that anyone in the community can use, improve, and enjoy. We'd love you to join us! Here's a few ways to find out what's happening and get involved. + +

+
+{{ partial "community_links.html" . }} + +
+{{ .Content }} +
+ +{{ end }} diff --git a/site/layouts/partials/community-links.html b/site/layouts/partials/community_links.html similarity index 73% rename from site/layouts/partials/community-links.html rename to site/layouts/partials/community_links.html index 409b78a16e..06d94af178 100644 --- a/site/layouts/partials/community-links.html +++ b/site/layouts/partials/community_links.html @@ -10,11 +10,16 @@

Develop and Contribute

+

minikube is a Kubernetes #sig-cluster-lifecycle project.

If you want to get more involved by contributing to {{ .Site.Title }}, join us here: {{ with index $links "developer"}} {{ template "community-links-list" . }} {{ end }} -

You can find out how to contribute to these docs in our Contributing Guide. +

You can find out how to contribute to these docs in our Contribution Guidelines. +

Join our meetings

+{{ with index $links "meetings"}} +{{ template "community-links-list" . }} +{{ end }}
diff --git a/test/integration/README.md b/test/integration/README.md index 7af031f6ea..8a62883dde 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -1,29 +1 @@ -# Integration tests - -## The basics - -To run all tests from the minikube root directory: - -`make integration` - -## Quickly iterating on a single test - -Run a single test on an active cluster: - -`make integration -e TEST_ARGS="-test.run TestFunctional/parallel/MountCmd --profile=minikube --cleanup=false"` - -WARNING: For this to work repeatedly, the test must be written so that it cleans up after itself. - -See `main.go` for details. - -## Disabling parallelism - -`make integration -e TEST_ARGS="-test.parallel=1"` - -## Testing philosophy - -- Tests should be so simple as to be correct by inspection -- Readers should need to read only the test body to understand the test -- Top-to-bottom readability is more important than code de-duplication - -Tests are typically read with a great air of skepticism, because chances are they are being read only when things are broken. +This document has moved to https://minikube.sigs.k8s.io/docs/contrib/testing/#integration-tests diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index 7d198b7634..9d5ec28443 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -154,6 +154,7 @@ func TestDownloadOnlyKic(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(15)) defer Cleanup(t, profile, cancel) + // TODO: #7795 add containerd to download only too cRuntime := "docker" args := []string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr"} diff --git a/test/integration/cert_options_test.go b/test/integration/cert_options_test.go index efa1cb4262..bd4318dbb8 100644 --- a/test/integration/cert_options_test.go +++ b/test/integration/cert_options_test.go @@ -21,6 +21,7 @@ package integration import ( "context" "os/exec" + "strings" "testing" ) @@ -35,7 +36,7 @@ func TestCertOptions(t *testing.T) { defer CleanupWithLogs(t, profile, cancel) // Use the most verbose logging for the simplest test. If it fails, something is very wrong. - args := append([]string{"start", "-p", profile, "--memory=1900", "--apiserver-ips=127.0.0.1,192.168.15.15", "--apiserver-names=localhost,www.google.com", "--apiserver-port=8555"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--memory=1900", "--apiserver-ips=127.0.0.1", "--apiserver-ips=192.168.15.15", "--apiserver-names=localhost", "--apiserver-names=www.google.com", "--apiserver-port=8555"}, StartArgs()...) // We can safely override --apiserver-name with if NeedsPortForward() { @@ -47,10 +48,33 @@ func TestCertOptions(t *testing.T) { t.Errorf("failed to start minikube with args: %q : %v", rr.Command(), err) } - // test that file written from host was read in by the pod via cat /mount-9p/written-by-host; - rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "version")) + // verify that the alternate names/ips are included in the apiserver cert + // in minikube vm, run - openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt + // to inspect the apiserver cert + + // can filter further with '-certopt no_subject,no_header,no_version,no_serial,no_signame,no_validity,no_issuer,no_pubkey,no_sigdump,no_aux' + apiserverCertCmd := "openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt" + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", apiserverCertCmd)) if err != nil { - t.Errorf("failed to get kubectl version. args %q : %v", rr.Command(), err) + t.Errorf("failed to read apiserver cert inside minikube. args %q: %v", rr.Command(), err) + } + + extraNamesIps := [4]string{"127.0.0.1", "192.168.15.15", "localhost", "www.google.com"} + + for _, eni := range extraNamesIps { + if !strings.Contains(rr.Stdout.String(), eni) { + t.Errorf("apiserver cert does not include %s in SAN.", eni) + } + } + + // verify that the apiserver is serving on port 8555 + + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "config", "view")) + if err != nil { + t.Errorf("failed to get kubectl config. args %q : %v", rr.Command(), err) + } + if !strings.Contains(rr.Stdout.String(), "8555") { + t.Errorf("apiserver server port incorrect. Output of 'kubectl config view' = %q", rr.Output()) } } diff --git a/test/integration/error_spam_test.go b/test/integration/error_spam_test.go index 98a4fa1508..d144da8945 100644 --- a/test/integration/error_spam_test.go +++ b/test/integration/error_spam_test.go @@ -21,10 +21,22 @@ package integration import ( "context" "os/exec" + "regexp" "strings" "testing" ) +// stderrWhitelist are regular expressions acceptable to find in normal stderr +var stderrWhitelist = []string{ + // kubectl out of date warning + `kubectl`, + // slow docker warning + `slow|long time|Restarting the docker service may improve`, +} + +// stderrWhitelistRe combines rootCauses into a single regex +var stderrWhitelistRe = regexp.MustCompile(strings.Join(stderrWhitelist, "|")) + // TestErrorSpam asserts that there are no errors displayed func TestErrorSpam(t *testing.T) { if NoneDriver() { @@ -41,20 +53,22 @@ func TestErrorSpam(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("failed to start minikube with args: %q : %v", rr.Command(), err) + t.Errorf("%q failed: %v", rr.Command(), err) } for _, line := range strings.Split(rr.Stderr.String(), "\n") { if strings.HasPrefix(line, "E") { - t.Errorf("unexpected error log in stderr: %q", line) + t.Errorf("unexpected error log: %q", line) continue } - if strings.Contains(line, "kubectl") || strings.Contains(line, "slow") || strings.Contains(line, "long time") { + if stderrWhitelistRe.MatchString(line) { + t.Logf("acceptable stderr: %q", line) continue } + if len(strings.TrimSpace(line)) > 0 { - t.Errorf("unexpected stderr line: %q", line) + t.Errorf("unexpected stderr: %q", line) } } @@ -62,7 +76,7 @@ func TestErrorSpam(t *testing.T) { keywords := []string{"error", "fail", "warning", "conflict"} for _, keyword := range keywords { if strings.Contains(line, keyword) { - t.Errorf("unexpected %q in stdout line: %q", keyword, line) + t.Errorf("unexpected %q in stdout: %q", keyword, line) } } } diff --git a/test/integration/fn_mount_cmd.go b/test/integration/fn_mount_cmd.go index 915262a833..f6e218d8a4 100644 --- a/test/integration/fn_mount_cmd.go +++ b/test/integration/fn_mount_cmd.go @@ -52,6 +52,12 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { } tempDir, err := ioutil.TempDir("", "mounttest") + defer func() { //clean up tempdir + err := os.RemoveAll(tempDir) + if err != nil { + t.Errorf("failed to clean up %q temp folder.", tempDir) + } + }() if err != nil { t.Fatalf("Unexpected error while creating tempDir: %v", err) } diff --git a/test/integration/fn_tunnel_cmd.go b/test/integration/fn_tunnel_cmd.go index 8f43a9a5fd..8a1f0757e8 100644 --- a/test/integration/fn_tunnel_cmd.go +++ b/test/integration/fn_tunnel_cmd.go @@ -41,9 +41,8 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { ctx, cancel := context.WithTimeout(ctx, Minutes(20)) defer cancel() - if runtime.GOOS != "windows" { - // Otherwise minikube fails waiting for a password. - if err := exec.Command("sudo", "-n", "route").Run(); err != nil { + if !KicDriver() && runtime.GOOS != "windows" { + if err := exec.Command("sudo", "-n", "ifconfig").Run(); err != nil { t.Skipf("password required to execute 'route', skipping testTunnel: %v", err) } } @@ -59,7 +58,7 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { } // Start the tunnel - args := []string{"-p", profile, "tunnel", "--alsologtostderr", "-v=1"} + args := []string{"-p", profile, "tunnel", "--alsologtostderr"} ss, err := Start(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Errorf("failed to start a tunnel: args %q: %v", args, err) @@ -80,14 +79,14 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { } // Wait until the nginx-svc has a loadbalancer ingress IP - nginxIP := "" - err = wait.PollImmediate(1*time.Second, Minutes(3), func() (bool, error) { + hostname := "" + err = wait.PollImmediate(5*time.Second, Minutes(3), func() (bool, error) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "svc", "nginx-svc", "-o", "jsonpath={.status.loadBalancer.ingress[0].ip}")) if err != nil { return false, err } if len(rr.Stdout.String()) > 0 { - nginxIP = rr.Stdout.String() + hostname = rr.Stdout.String() return true, nil } return false, nil @@ -103,9 +102,11 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { } got := []byte{} + url := fmt.Sprintf("http://%s", hostname) + fetch := func() error { h := &http.Client{Timeout: time.Second * 10} - resp, err := h.Get(fmt.Sprintf("http://%s", nginxIP)) + resp, err := h.Get(url) if err != nil { return &retry.RetriableError{Err: err} } @@ -119,12 +120,35 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { } return nil } - if err = retry.Expo(fetch, time.Millisecond*500, Minutes(2), 13); err != nil { - t.Errorf("failed to hit nginx at %q: %v", nginxIP, err) + if err = retry.Expo(fetch, 3*time.Second, Minutes(2), 13); err != nil { + t.Errorf("failed to hit nginx at %q: %v", url, err) } want := "Welcome to nginx!" - if !strings.Contains(string(got), want) { + if strings.Contains(string(got), want) { + t.Logf("tunnel at %s is working!", url) + } else { t.Errorf("expected body to contain %q, but got *%q*", want, got) } + + // Not all platforms support DNS forwarding + if runtime.GOOS != "darwin" { + return + } + + // use FQDN to avoid extra DNS query lookup + url = "http://nginx-svc.default.svc.cluster.local." + if err = retry.Expo(fetch, 3*time.Second, Seconds(30), 10); err != nil { + t.Errorf("failed to hit nginx with DNS forwarded %q: %v", url, err) + // debug more information for: https://github.com/kubernetes/minikube/issues/7809 + clusterLogs(t, profile) + } + + want = "Welcome to nginx!" + if strings.Contains(string(got), want) { + t.Logf("tunnel at %s is working!", url) + } else { + t.Errorf("expected body to contain %q, but got *%q*", want, got) + } + } diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 5f1ef0124b..2f41ea89ef 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -670,7 +670,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { t.Fatalf("failed to get service url. args %q : %v", rr.Command(), err) } if rr.Stderr.String() != "" { - t.Errorf("expected stderr to be empty but got *%q*", rr.Stderr) + t.Errorf("expected stderr to be empty but got *%q* . args %q", rr.Stderr, rr.Command()) } endpoint := strings.TrimSpace(rr.Stdout.String()) diff --git a/test/integration/main.go b/test/integration/main.go index 2ef2d90731..fb706aebdd 100644 --- a/test/integration/main.go +++ b/test/integration/main.go @@ -69,9 +69,19 @@ func HyperVDriver() bool { return strings.Contains(*startArgs, "--driver=hyperv") || strings.Contains(*startArgs, "--vm-driver=hyperv") } +// DockerDriver returns whether or not this test is using the docker or podman driver +func DockerDriver() bool { + return strings.Contains(*startArgs, "--driver=docker") || strings.Contains(*startArgs, "--vm-driver=docker") +} + +// PodmanDriver returns whether or not this test is using the docker or podman driver +func PodmanDriver() bool { + return strings.Contains(*startArgs, "--vm-driver=podman") || strings.Contains(*startArgs, "driver=podman") +} + // KicDriver returns whether or not this test is using the docker or podman driver func KicDriver() bool { - return strings.Contains(*startArgs, "--driver=docker") || strings.Contains(*startArgs, "--vm-driver=docker") || strings.Contains(*startArgs, "--vm-driver=podman") || strings.Contains(*startArgs, "driver=podman") + return DockerDriver() || PodmanDriver() } // NeedsPortForward returns access to endpoints with this driver needs port forwarding diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go new file mode 100644 index 0000000000..5a9cc4f16a --- /dev/null +++ b/test/integration/multinode_test.go @@ -0,0 +1,193 @@ +// +build integration + +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "context" + "os/exec" + "strings" + "testing" +) + +func TestMultiNode(t *testing.T) { + if NoneDriver() { + t.Skip("none driver does not support multinode") + } + MaybeParallel(t) + + type validatorFunc func(context.Context, *testing.T, string) + profile := UniqueProfileName("multinode") + ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) + defer CleanupWithLogs(t, profile, cancel) + + t.Run("serial", func(t *testing.T) { + tests := []struct { + name string + validator validatorFunc + }{ + {"FreshStart2Nodes", validateMultiNodeStart}, + {"AddNode", validateAddNodeToMultiNode}, + {"StopNode", validateStopRunningNode}, + {"StartAfterStop", validateStartNodeAfterStop}, + {"DeleteNode", validateDeleteNodeFromMultiNode}, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + tc.validator(ctx, t, profile) + }) + } + }) +} + +func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) { + // Start a 2 node cluster with the --nodes param + startArgs := append([]string{"start", "-p", profile, "--wait=true", "--nodes=2"}, StartArgs()...) + rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) + if err != nil { + t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err) + } + + // Make sure minikube status shows 2 nodes + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) + if err != nil { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + + if strings.Count(rr.Stdout.String(), "host: Running") != 2 { + t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + + if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 { + t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + +} + +func validateAddNodeToMultiNode(ctx context.Context, t *testing.T, profile string) { + // Add a node to the current cluster + addArgs := []string{"node", "add", "-p", profile, "-v", "3", "--alsologtostderr"} + rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...)) + if err != nil { + t.Fatalf("failed to add node to current cluster. args %q : %v", rr.Command(), err) + } + + // Make sure minikube status shows 3 nodes + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) + if err != nil { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + + if strings.Count(rr.Stdout.String(), "host: Running") != 3 { + t.Errorf("status says all hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + + if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 { + t.Errorf("status says all kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) + } +} + +func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) { + // Names are autogenerated using the node.Name() function + name := "m03" + + // Run minikube node stop on that node + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", name)) + if err != nil { + t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) + } + + // Run status again to see the stopped host + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) + // Exit code 7 means one host is stopped, which we are expecting + if err != nil && rr.ExitCode != 7 { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + + // Make sure minikube status shows 2 running nodes and 1 stopped one + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) + if err != nil && rr.ExitCode != 7 { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + + if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 { + t.Errorf("incorrect number of running kubelets: args %q: %v", rr.Command(), rr.Stdout.String()) + } + + if strings.Count(rr.Stdout.String(), "host: Stopped") != 1 { + t.Errorf("incorrect number of stopped hosts: args %q: %v", rr.Command(), rr.Stdout.String()) + } + + if strings.Count(rr.Stdout.String(), "kubelet: Stopped") != 1 { + t.Errorf("incorrect number of stopped kubelets: args %q: %v", rr.Command(), rr.Stdout.String()) + } +} + +func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile string) { + // TODO (#7496): remove skip once restarts work + t.Skip("Restarting nodes is broken :(") + + // Grab the stopped node + name := "m03" + + // Start the node back up + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", name)) + if err != nil { + t.Errorf("node start returned an error. args %q: %v", rr.Command(), err) + } + + // Make sure minikube status shows 3 running hosts + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) + if err != nil { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + + if strings.Count(rr.Stdout.String(), "host: Running") != 3 { + t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + + if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 { + t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) + } +} + +func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile string) { + name := "m03" + + // Start the node back up + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "delete", name)) + if err != nil { + t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) + } + + // Make sure status is back down to 2 hosts + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) + if err != nil { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + + if strings.Count(rr.Stdout.String(), "host: Running") != 2 { + t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + + if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 { + t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + +} diff --git a/test/integration/pause_test.go b/test/integration/pause_test.go new file mode 100644 index 0000000000..91ab1a7ca3 --- /dev/null +++ b/test/integration/pause_test.go @@ -0,0 +1,148 @@ +// +build integration + +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "context" + "encoding/json" + "os/exec" + "strings" + "testing" +) + +func TestPause(t *testing.T) { + MaybeParallel(t) + + type validateFunc func(context.Context, *testing.T, string) + profile := UniqueProfileName("pause") + ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) + defer CleanupWithLogs(t, profile, cancel) + + // Serial tests + t.Run("serial", func(t *testing.T) { + tests := []struct { + name string + validator validateFunc + }{ + {"Start", validateFreshStart}, + {"SecondStartNoReset", validateStartNoReset}, + {"Pause", validatePause}, + {"Unpause", validateUnpause}, + {"PauseAgain", validatePause}, + {"DeletePaused", validateDelete}, + {"VerifyDeletedResources", validateVerifyDeleted}, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + tc.validator(ctx, t, profile) + }) + } + }) +} + +func validateFreshStart(ctx context.Context, t *testing.T, profile string) { + args := append([]string{"start", "-p", profile, "--memory=1800", "--install-addons=false", "--wait=all"}, StartArgs()...) + rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) + if err != nil { + t.Fatalf("failed to start minikube with args: %q : %v", rr.Command(), err) + } +} + +// validateStartNoReset validates that starting a running cluster won't invoke a reset +func validateStartNoReset(ctx context.Context, t *testing.T, profile string) { + args := []string{"start", "-p", profile, "--alsologtostderr", "-v=5"} + rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) + if err != nil { + t.Fatalf("failed to second start a running minikube with args: %q : %v", rr.Command(), err) + } + if !NoneDriver() { + softLog := "The running cluster does not need a reset" + if !strings.Contains(rr.Output(), softLog) { + t.Errorf("expected the second start log outputs to include %q but got: %s", softLog, rr.Output()) + } + } + +} + +func validatePause(ctx context.Context, t *testing.T, profile string) { + args := []string{"pause", "-p", profile, "--alsologtostderr", "-v=5"} + rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) + if err != nil { + t.Errorf("failed to pause minikube with args: %q : %v", rr.Command(), err) + } +} + +func validateUnpause(ctx context.Context, t *testing.T, profile string) { + args := []string{"unpause", "-p", profile, "--alsologtostderr", "-v=5"} + rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) + if err != nil { + t.Errorf("failed to unpause minikube with args: %q : %v", rr.Command(), err) + } +} + +func validateDelete(ctx context.Context, t *testing.T, profile string) { + args := []string{"delete", "-p", profile, "--alsologtostderr", "-v=5"} + rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) + if err != nil { + t.Errorf("failed to delete minikube with args: %q : %v", rr.Command(), err) + } +} + +// make sure no left over left after deleting a profile such as containers or volumes +func validateVerifyDeleted(ctx context.Context, t *testing.T, profile string) { + rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) + if err != nil { + t.Errorf("failed to list profiles with json format after it was deleted. args %q: %v", rr.Command(), err) + } + + var jsonObject map[string][]map[string]interface{} + if err := json.Unmarshal(rr.Stdout.Bytes(), &jsonObject); err != nil { + t.Errorf("failed to decode json from profile list: args %q: %v", rr.Command(), err) + } + validProfiles := jsonObject["valid"] + profileExists := false + for _, profileObject := range validProfiles { + if profileObject["Name"] == profile { + profileExists = true + break + } + } + if profileExists { + t.Errorf("expected the deleted profile %q not to show up in profile list but it does! output: %s . args: %q", profile, rr.Stdout.String(), rr.Command()) + } + + if KicDriver() { + bin := "docker" + if PodmanDriver() { + bin = "podman" + } + rr, err := Run(t, exec.CommandContext(ctx, bin, "ps", "-a")) + if err == nil && strings.Contains(rr.Output(), profile) { + t.Errorf("expected container %q not to exist in output of %s but it does output: %s.", profile, rr.Command(), rr.Output()) + } + + rr, err = Run(t, exec.CommandContext(ctx, bin, "volume", "inspect", profile)) + if err == nil { + t.Errorf("expected to see error and volume %q to not exist after deletion but got no error and this output: %s", rr.Command(), rr.Output()) + } + + } + +} diff --git a/test/integration/preload_test.go b/test/integration/preload_test.go new file mode 100644 index 0000000000..1763eee3a8 --- /dev/null +++ b/test/integration/preload_test.go @@ -0,0 +1,71 @@ +// +build integration + +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "context" + "fmt" + "os/exec" + "strings" + "testing" +) + +func TestPreload(t *testing.T) { + if NoneDriver() { + t.Skipf("skipping %s - incompatible with none driver", t.Name()) + } + + profile := UniqueProfileName("test-preload") + ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) + defer CleanupWithLogs(t, profile, cancel) + + startArgs := []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true", "--preload=false"} + startArgs = append(startArgs, StartArgs()...) + k8sVersion := "v1.17.0" + startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) + + rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) + if err != nil { + t.Fatalf("%s failed: %v", rr.Command(), err) + } + + // Now, pull the busybox image into the VMs docker daemon + image := "busybox" + rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "pull", image)) + if err != nil { + t.Fatalf("%s failed: %v", rr.Command(), err) + } + + // Restart minikube with v1.17.3, which has a preloaded tarball + startArgs = []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true"} + startArgs = append(startArgs, StartArgs()...) + k8sVersion = "v1.17.3" + startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) + rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) + if err != nil { + t.Fatalf("%s failed: %v", rr.Command(), err) + } + rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "images")) + if err != nil { + t.Fatalf("%s failed: %v", rr.Command(), err) + } + if !strings.Contains(rr.Output(), image) { + t.Fatalf("Expected to find %s in output of `docker images`, instead got %s", image, rr.Output()) + } +} diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index 593019c06f..0b4a483ab2 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -44,7 +44,7 @@ func TestStartStop(t *testing.T) { version string args []string }{ - {"old-docker", constants.OldestKubernetesVersion, []string{ + {"old-k8s-version", constants.OldestKubernetesVersion, []string{ // default is the network created by libvirt, if we change the name minikube won't boot // because the given network doesn't exist "--kvm-network=default", @@ -174,50 +174,6 @@ func TestStartStop(t *testing.T) { }) } -func TestStartStopWithPreload(t *testing.T) { - if NoneDriver() { - t.Skipf("skipping %s - incompatible with none driver", t.Name()) - } - - profile := UniqueProfileName("test-preload") - ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) - defer CleanupWithLogs(t, profile, cancel) - - startArgs := []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true", "--preload=false"} - startArgs = append(startArgs, StartArgs()...) - k8sVersion := "v1.17.0" - startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) - - rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) - if err != nil { - t.Fatalf("%s failed: %v", rr.Command(), err) - } - - // Now, pull the busybox image into the VMs docker daemon - image := "busybox" - rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "pull", image)) - if err != nil { - t.Fatalf("%s failed: %v", rr.Command(), err) - } - - // Restart minikube with v1.17.3, which has a preloaded tarball - startArgs = []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true"} - startArgs = append(startArgs, StartArgs()...) - k8sVersion = "v1.17.3" - startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) - rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) - if err != nil { - t.Fatalf("%s failed: %v", rr.Command(), err) - } - rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "images")) - if err != nil { - t.Fatalf("%s failed: %v", rr.Command(), err) - } - if !strings.Contains(rr.Output(), image) { - t.Fatalf("Expected to find %s in output of `docker images`, instead got %s", image, rr.Output()) - } -} - // testPodScheduling asserts that this configuration can schedule new pods func testPodScheduling(ctx context.Context, t *testing.T, profile string) { t.Helper() diff --git a/test/integration/util.go b/test/integration/util.go index f3d7c1916c..ebfd9ad3d2 100644 --- a/test/integration/util.go +++ b/test/integration/util.go @@ -56,5 +56,6 @@ func UniqueProfileName(prefix string) string { if NoneDriver() { return "minikube" } - return fmt.Sprintf("%s-%s-%d", prefix, time.Now().Format("20060102T150405.999999999"), os.Getpid()) + // example: prefix-20200413162239-3215 + return fmt.Sprintf("%s-%s-%d", prefix, time.Now().Format("20060102150405"), os.Getpid()) } diff --git a/translations/strings.txt b/translations/strings.txt new file mode 100644 index 0000000000..db29478557 --- /dev/null +++ b/translations/strings.txt @@ -0,0 +1,640 @@ +{ + "\"The '{{.minikube_addon}}' addon is disabled": "", + "\"{{.context}}\" context has been updated to point to {{.hostname}}:{{.port}}": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "", + "\"{{.name}}\" profile does not exist, trying anyways.": "", + "'none' driver does not support 'minikube docker-env' command": "", + "'none' driver does not support 'minikube mount' command": "", + "'none' driver does not support 'minikube podman-env' command": "", + "'none' driver does not support 'minikube ssh' command": "", + "'{{.driver}}' driver reported an issue: {{.error}}": "", + "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "", + "A firewall is blocking Docker the minikube VM from reaching the image repository. You may need to select --image-repository, or use a proxy.": "", + "A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "", + "A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.": "", + "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", + "A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", + "A set of key=value pairs that describe feature gates for alpha/experimental features.": "", + "Access the kubernetes dashboard running within the minikube cluster": "", + "Add an image to local cache.": "", + "Add machine IP to NO_PROXY environment variable": "", + "Add or delete an image from the local cache.": "", + "Adding node {{.name}} to cluster {{.cluster}}": "", + "Additional help topics": "", + "Additional mount options, such as cache=fscache": "", + "Adds a node to the given cluster config, and starts it.": "", + "Adds a node to the given cluster.": "", + "Advanced Commands:": "", + "Aliases": "", + "Allow user prompts for more information": "", + "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "", + "Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", + "Amount of time to wait for a service in seconds": "", + "Amount of time to wait for service in seconds": "", + "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", + "Another program is using a file required by minikube. If you are using Hyper-V, try stopping the minikube VM from within the Hyper-V manager": "", + "Automatically selected the {{.driver}} driver": "", + "Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "", + "Available Commands": "", + "Basic Commands:": "", + "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", + "Bind Address: {{.Address}}": "", + "Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "", + "Cannot find directory {{.path}} for mount": "", + "Cannot use both --output and --format options": "", + "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", + "Check that SELinux is disabled, and that the provided apiserver flags are valid": "", + "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.": "", + "Check that the provided apiserver flags are valid, and that SELinux is disabled": "", + "Check that your --kubernetes-version has a leading 'v'. For example: 'v1.1.14'": "", + "Check your firewall rules for interference, and run 'virt-host-validate' to check for KVM configuration issues. If you are running minikube within a VM, consider using --driver=none": "", + "Choose a smaller value for --memory, such as 2000": "", + "Configuration and Management Commands:": "", + "Configure a default route on this Linux host, or use another --driver that does not require it": "", + "Configure an external network switch following the official documentation, then add `--hyperv-virtual-switch=\u003cswitch-name\u003e` to `minikube start`": "", + "Configures the addon w/ADDON_NAME within minikube (example: minikube addons configure registry-creds). For a list of available addons use: minikube addons list": "", + "Configuring local host environment ...": "", + "Confirm that you have a working internet connection and that your VM has not run out of resources by using: 'minikube logs'": "", + "Confirm that you have supplied the correct value to --hyperv-virtual-switch using the 'Get-VMSwitch' command": "", + "Could not process error from failed deletion": "", + "Could not process errors from failed deletion": "", + "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "", + "Creating mount {{.name}} ...": "", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB) ...": "", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "DEPRECATED, use `driver` instead.": "", + "Default group id used for the mount": "", + "Default user id used for the mount": "", + "Delete an image from the local cache.": "", + "Deletes a local kubernetes cluster": "", + "Deletes a local kubernetes cluster. This command deletes the VM, and removes all\nassociated files.": "", + "Deletes a node from a cluster.": "", + "Deleting \"{{.profile_name}}\" in {{.driver_name}} ...": "", + "Deleting node {{.name}} from cluster {{.cluster}}": "", + "Disable checking for the availability of hardware virtualization before the vm is started (virtualbox driver only)": "", + "Disable dynamic memory in your VM manager, or pass in a larger --memory value": "", + "Disables the addon w/ADDON_NAME within minikube (example: minikube addons disable dashboard). For a list of available addons use: minikube addons list": "", + "Disables the filesystem mounts provided by the hypervisors": "", + "Disk size allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", + "Display dashboard URL instead of opening a browser": "", + "Display the kubernetes addons URL in the CLI instead of opening it in the default browser": "", + "Display the kubernetes service URL in the CLI instead of opening it in the default browser": "", + "Display values currently set in the minikube config file": "", + "Display values currently set in the minikube config file.": "", + "Docker inside the VM is unavailable. Try running 'minikube delete' to reset the VM.": "", + "Docs have been saved at - {{.path}}": "", + "Documentation: {{.url}}": "", + "Done! kubectl is now configured to use \"{{.name}}\"": "", + "Download complete!": "", + "Downloading Kubernetes {{.version}} preload ...": "", + "Downloading VM boot image ...": "", + "Downloading driver {{.driver}}:": "", + "Due to {{.driver_name}} networking limitations on {{.os_name}}, {{.addon_name}} addon is not supported for this driver.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps://github.com/kubernetes/minikube/issues/7332": "", + "ERROR creating `registry-creds-acr` secret": "", + "ERROR creating `registry-creds-dpr` secret": "", + "ERROR creating `registry-creds-ecr` secret: {{.error}}": "", + "ERROR creating `registry-creds-gcr` secret: {{.error}}": "", + "Either systemctl is not installed, or Docker is broken. Run 'sudo systemctl start docker' and 'journalctl -u docker'": "", + "Enable addons. see `minikube addons list` for a list of valid addon names.": "", + "Enable experimental NVIDIA GPU support in minikube": "", + "Enable host resolver for NAT DNS requests (virtualbox driver only)": "", + "Enable proxy for NAT DNS requests (virtualbox driver only)": "", + "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "", + "Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list": "", + "Enabling '{{.name}}' returned an error: {{.error}}": "", + "Enabling addons: {{.addons}}": "", + "Enabling dashboard ...": "", + "Ensure that CRI-O is installed and healthy: Run 'sudo systemctl start crio' and 'journalctl -u crio'. Alternatively, use --container-runtime=docker": "", + "Ensure that Docker is installed and healthy: Run 'sudo systemctl start docker' and 'journalctl -u docker'. Alternatively, select another value for --driver": "", + "Ensure that the user listed in /etc/libvirt/qemu.conf has access to your home directory": "", + "Ensure that your value for HTTPS_PROXY points to an HTTPS proxy rather than an HTTP proxy": "", + "Environment variables to pass to the Docker daemon. (format: key=value)": "", + "Error creating minikube directory": "", + "Error creating view template": "", + "Error detecting shell": "", + "Error executing view template": "", + "Error finding port for mount": "", + "Error generating set output": "", + "Error generating unset output": "", + "Error getting cluster bootstrapper": "", + "Error getting cluster config": "", + "Error getting host": "", + "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", + "Error getting primary control plane": "", + "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", + "Error getting ssh client": "", + "Error getting the host IP address to use from within the VM": "", + "Error killing mount process": "", + "Error loading profile config: {{.error}}": "", + "Error opening service": "", + "Error parsing minikube version: {{.error}}": "", + "Error reading {{.path}}: {{.error}}": "", + "Error starting cluster": "", + "Error starting mount": "", + "Error while setting kubectl current context : {{.error}}": "", + "Error writing mount pid": "", + "Examples": "", + "Executing \"{{.command}}\" took an unusually long time: {{.duration}}": "", + "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "", + "Exiting": "", + "Exiting.": "", + "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "", + "Failed runtime": "", + "Failed to cache and load images": "", + "Failed to cache binaries": "", + "Failed to cache images": "", + "Failed to cache images to tar": "", + "Failed to cache kubectl": "", + "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "", + "Failed to check main repository and mirrors for images for images": "", + "Failed to delete cluster {{.name}}, proceeding with retry anyway.": "", + "Failed to delete cluster: {{.error}}": "", + "Failed to delete images": "", + "Failed to delete images from config": "", + "Failed to enable container runtime": "", + "Failed to get API Server URL": "", + "Failed to get bootstrapper": "", + "Failed to get command runner": "", + "Failed to get image map": "", + "Failed to get service URL: {{.error}}": "", + "Failed to kill mount process: {{.error}}": "", + "Failed to list cached images": "", + "Failed to reload cached images": "", + "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "", + "Failed to setup certs": "", + "Failed to start {{.driver}} {{.driver_type}}. \"{{.cmd}}\" may fix it: {{.error}}": "", + "Failed to stop node {{.name}}": "", + "Failed to update cluster": "", + "Failed to update config": "", + "Failed to validate '{{.driver}}' driver": "", + "Failed unmount: {{.error}}": "", + "File permissions used for the mount": "", + "Filter to use only VM Drivers": "", + "Flags": "", + "Follow": "", + "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "", + "For more information, see: https://minikube.sigs.k8s.io/docs/reference/drivers/none/": "", + "Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect": "", + "Force minikube to perform possibly dangerous operations": "", + "Found network options:": "", + "Found {{.number}} invalid profile(s) !": "", + "Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Generate unable to parse memory '{{.memory}}': {{.error}}": "", + "Gets the kubernetes URL(s) for the specified service in your local cluster": "", + "Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "", + "Gets the logs of the running instance, used for debugging minikube, not user code.": "", + "Gets the status of a local kubernetes cluster": "", + "Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "", + "Gets the value of PROPERTY_NAME from the minikube config file": "", + "Global Flags": "", + "Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "", + "Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", + "Go template format string for the status output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#Status": "", + "Group ID: {{.groupID}}": "", + "Have you set up libvirt correctly?": "", + "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)": "", + "Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", + "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", + "If set, automatically updates drivers to the latest version. Defaults to true.": "", + "If set, delete the current cluster if start fails and try again. Defaults to false.": "", + "If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "", + "If set, install addons. Defaults to true.": "", + "If set, pause all namespaces": "", + "If set, unpause all namespaces": "", + "If the above advice does not help, please let us know:": "", + "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none.": "", + "If true, only download and cache files for later use - don't install or start anything.": "", + "If true, the added node will be marked for work. Defaults to true.": "", + "If true, the node added will also be a control plane in addition to a worker.": "", + "If using the none driver, ensure that systemctl is installed": "", + "If you are running minikube within a VM, consider using --driver=none:": "", + "Images Commands:": "", + "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "", + "Install VirtualBox, or select an alternative value for --driver": "", + "Install the latest hyperkit binary, and run 'minikube delete'": "", + "Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "", + "Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "", + "Kill the mount process spawned by minikube start": "", + "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "", + "Kubernetes {{.version}} is not supported by this release of minikube": "", + "Launching proxy ...": "", + "List all available images from the local cache.": "", + "List of guest VSock ports that should be exposed as sockets on the host (hyperkit driver only)": "", + "Lists all available minikube addons as well as their current statuses (enabled/disabled)": "", + "Lists all minikube profiles.": "", + "Lists all valid minikube profiles and detects all possible invalid profiles.": "", + "Lists the URLs for the services in your local cluster": "", + "Local folders to share with Guest via NFS mounts (hyperkit driver only)": "", + "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "", + "Locations to fetch the minikube ISO from.": "", + "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "", + "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "", + "Message Size: {{.size}}": "", + "Minikube is a CLI tool that provisions and manages single-node Kubernetes clusters optimized for development workflows.": "", + "Minikube is a tool for managing local Kubernetes clusters.": "", + "Modify minikube config": "", + "Modify minikube's kubernetes addons": "", + "Most users should use the newer 'docker' driver instead, which does not require root!": "", + "Mount type: {{.name}}": "", + "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...": "", + "Mounts the specified directory into minikube": "", + "Mounts the specified directory into minikube.": "", + "Multiple errors deleting profiles": "", + "Multiple minikube profiles were found -": "", + "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)": "", + "NOTE: This process must stay alive for the mount to be accessible ...": "", + "Networking and Connectivity Commands:": "", + "No changes required for the \"{{.context}}\" context": "", + "No minikube profile was found. You can create one using `minikube start`.": "", + "Node \"{{.node_name}}\" stopped.": "", + "Node operations": "", + "Node {{.name}} failed to start, deleting and trying again.": "", + "Node {{.name}} was successfully deleted.": "", + "Node {{.nodeName}} does not exist.": "", + "Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "", + "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "", + "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "", + "Not passing {{.name}}={{.value}} to docker env.": "", + "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "", + "Number of CPUs allocated to Kubernetes.": "", + "Number of lines back to go within the log": "", + "OS release is {{.pretty_name}}": "", + "One of 'yaml' or 'json'.": "", + "Only alphanumeric, dots, underscores and dashes '-' are permitted. Minimum 2 characters, starting by alphanumeric.": "", + "Open the addons URL with https instead of http": "", + "Open the service URL with https instead of http": "", + "Opening kubernetes service {{.namespace_name}}/{{.service_name}} in default browser...": "", + "Opening service {{.namespace_name}}/{{.service_name}} in default browser...": "", + "Opening {{.url}} in your default browser...": "", + "Opens the addon w/ADDON_NAME within minikube (example: minikube addons open dashboard). For a list of available addons use: minikube addons list": "", + "Operations on nodes": "", + "Options: {{.options}}": "", + "Outputs minikube shell completion for the given shell (bash or zsh)": "", + "Outputs minikube shell completion for the given shell (bash or zsh)\n\n\tThis depends on the bash-completion binary. Example installation instructions:\n\tOS X:\n\t\t$ brew install bash-completion\n\t\t$ source $(brew --prefix)/etc/bash_completion\n\t\t$ minikube completion bash \u003e ~/.minikube-completion # for bash users\n\t\t$ minikube completion zsh \u003e ~/.minikube-completion # for zsh users\n\t\t$ source ~/.minikube-completion\n\tUbuntu:\n\t\t$ apt-get install bash-completion\n\t\t$ source /etc/bash-completion\n\t\t$ source \u003c(minikube completion bash) # for bash users\n\t\t$ source \u003c(minikube completion zsh) # for zsh users\n\n\tAdditionally, you may want to output the completion to a file and source in your .bashrc\n\n\tNote for zsh users: [1] zsh completions are only supported in versions of zsh \u003e= 5.2": "", + "Pause": "", + "Paused kubelet and {{.count}} containers": "", + "Paused kubelet and {{.count}} containers in: {{.namespaces}}": "", + "Permissions: {{.octalMode}} ({{.writtenMode}})": "", + "Please enter a value:": "", + "Please install the minikube hyperkit VM driver, or select an alternative --driver": "", + "Please install the minikube kvm2 VM driver, or select an alternative --driver": "", + "Please make sure the service you are looking for is deployed or is in the correct namespace.": "", + "Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "", + "Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "", + "Populates the specified folder with documentation in markdown about minikube": "", + "Powering off \"{{.profile_name}}\" via SSH ...": "", + "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...": "", + "Print current and latest version number": "", + "Print just the version number.": "", + "Print the version of minikube": "", + "Print the version of minikube.": "", + "Problems detected in {{.entry}}:": "", + "Problems detected in {{.name}}:": "", + "Profile gets or sets the current minikube profile": "", + "Profile name \"{{.profilename}}\" is reserved keyword. To delete this profile, run: \"{{.cmd}}\"": "", + "Profile name '{{.name}}' is not valid": "", + "Profile name '{{.profilename}}' is not valid": "", + "Provide VM UUID to restore MAC address (hyperkit driver only)": "", + "Pulling base image ...": "", + "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "", + "Rebuild libvirt with virt-network support": "", + "Received {{.name}} signal": "", + "Registry mirrors to pass to the Docker daemon": "", + "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "", + "Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "", + "Related issue: {{.url}}": "", + "Related issues:": "", + "Remove the incompatible --docker-opt flag if one was provided": "", + "Removed all traces of the \"{{.name}}\" cluster.": "", + "Removing {{.directory}} ...": "", + "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "", + "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "", + "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "", + "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "", + "Restart Docker": "", + "Restarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "", + "Restarting the {{.name}} service may improve performance.": "", + "Retrieve the ssh identity key path of the specified cluster": "", + "Retrieve the ssh identity key path of the specified cluster.": "", + "Retrieves the IP address of the running cluster": "", + "Retrieves the IP address of the running cluster, and writes it to STDOUT.": "", + "Retrieves the IP address of the running cluster, checks it\n\t\t\twith IP in kubeconfig, and corrects kubeconfig if incorrect.": "", + "Returns the value of PROPERTY_NAME from the minikube config file. Can be overwritten at runtime by flags or environmental variables.": "", + "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode.": "", + "Run 'kubectl describe pod coredns -n kube-system' and check for a firewall or DNS conflict": "", + "Run 'minikube delete' to delete the stale VM, or and ensure that minikube is running as the same user you are issuing this command with": "", + "Run 'sudo sysctl fs.protected_regular=1', or try a driver which does not require root, such as '--driver=docker'": "", + "Run kubectl": "", + "Run minikube from the C: drive.": "", + "Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "", + "Run: 'chmod 600 $HOME/.kube/config'": "", + "Run: 'kubectl delete clusterrolebinding kubernetes-dashboard'": "", + "Run: 'sudo mkdir /sys/fs/cgroup/systemd \u0026\u0026 sudo mount -t cgroup -o none,name=systemd cgroup /sys/fs/cgroup/systemd'": "", + "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "", + "Set failed": "", + "Set flag to delete all profiles": "", + "Set this flag to delete the '.minikube' folder from your user directory.": "", + "Sets an individual value in a minikube config file": "", + "Sets the PROPERTY_NAME config value to PROPERTY_VALUE\n\tThese values can be overwritten by flags or environment variables at runtime.": "", + "Sets up docker env variables; similar to '$(docker-machine env)'": "", + "Sets up docker env variables; similar to '$(docker-machine env)'.": "", + "Sets up podman env variables; similar to '$(podman-machine env)'": "", + "Sets up podman env variables; similar to '$(podman-machine env)'.": "", + "Setting profile failed": "", + "Show a list of global command-line options (applies to all commands).": "", + "Show only log entries which point to known problems": "", + "Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.": "", + "Skipped switching kubectl context for {{.profile_name}} because --keep-context was set.": "", + "Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path": "", + "Sorry, Kubernetes {{.version}} is not supported by this release of minikube": "", + "Sorry, completion support is not yet implemented for {{.name}}": "", + "Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config": "", + "Sorry, the url provided with the --registry-mirror flag is invalid: {{.url}}": "", + "Specified Kubernetes version {{.specified}} is less than the oldest supported version: {{.oldest}}": "", + "Specify --kubernetes-version in v\u003cmajor\u003e.\u003cminor.\u003cbuild\u003e form. example: 'v1.1.14'": "", + "Specify an alternate --host-only-cidr value, such as 172.16.0.1/24": "", + "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)": "", + "Specify the 9p version that the mount should use": "", + "Specify the ip that the mount should be setup on": "", + "Specify the mount filesystem type (supported types: 9p)": "", + "StartHost failed, but will try again: {{.error}}": "", + "Starting control plane node {{.name}} in cluster {{.cluster}}": "", + "Starting node {{.name}} in cluster {{.cluster}}": "", + "Starting tunnel for service {{.service}}.": "", + "Starts a local kubernetes cluster": "", + "Starts a node.": "", + "Starts an existing stopped node in a cluster.": "", + "Startup with {{.old_driver}} driver failed, trying with alternate driver {{.new_driver}}: {{.error}}": "", + "Stopping \"{{.profile_name}}\" in {{.driver_name}} ...": "", + "Stopping tunnel for service {{.service}}.": "", + "Stops a local kubernetes cluster running in Virtualbox. This command stops the VM\nitself, leaving all files intact. The cluster can be started again with the \"start\" command.": "", + "Stops a node in a cluster.": "", + "Stops a running local kubernetes cluster": "", + "Successfully added {{.name}} to {{.cluster}}!": "", + "Successfully deleted all profiles": "", + "Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "", + "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "", + "Suggestion: {{.advice}}": "", + "Suggestion: {{.fix}}": "", + "Target directory {{.path}} must be an absolute path": "", + "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.": "", + "The \"{{.driver_name}}\" driver should not be used with root privileges.": "", + "The 'none' driver is designed for experts who need to integrate with an existing VM": "", + "The '{{.addonName}}' addon is enabled": "", + "The '{{.driver}}' driver requires elevated permissions. The following commands will be executed:\\n\\n{{ .example }}\\n": "", + "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/": "", + "The '{{.name}}' driver does not respect the --cpus flag": "", + "The '{{.name}}' driver does not respect the --memory flag": "", + "The CIDR to be used for service cluster IPs.": "", + "The CIDR to be used for the minikube VM (virtualbox driver only)": "", + "The KVM QEMU connection URI. (kvm2 driver only)": "", + "The KVM driver is unable to resurrect this old VM. Please run `minikube delete` to delete it and try again.": "", + "The KVM network name. (kvm2 driver only)": "", + "The VM driver crashed. Run 'minikube start --alsologtostderr -v=8' to see the VM driver error message": "", + "The VM driver exited with an error, and may be corrupt. Run 'minikube start' with --alsologtostderr -v=8 to see the error": "", + "The VM that minikube is configured for no longer exists. Run 'minikube delete'": "", + "The apiserver listening port": "", + "The argument to pass the minikube mount command on start.": "", + "The authoritative apiserver hostname for apiserver certificates and connectivity. This can be used if you want to make the apiserver available from outside the machine": "", + "The cluster dns domain name used in the kubernetes cluster": "", + "The container runtime to be used (docker, crio, containerd).": "", + "The control plane for \"{{.name}}\" is paused!": "", + "The control plane node \"{{.name}}\" does not exist.": "", + "The control plane node is not running (state={{.state}})": "", + "The control plane node must be running for this command": "", + "The cri socket path to be used.": "", + "The docker service within '{{.name}}' is not active": "", + "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", + "The driver '{{.driver}}' is not supported on {{.os}}": "", + "The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "", + "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "", + "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "", + "The initial time interval for each check that wait performs in seconds": "", + "The machine-driver specified is failing to start. Try running 'docker-machine-driver-\u003ctype\u003e version'": "", + "The minikube VM is offline. Please run 'minikube start' to start it again.": "", + "The name of the network plugin.": "", + "The name of the node to delete": "", + "The name of the node to start": "", + "The node to get logs from. Defaults to the primary control plane.": "", + "The node to ssh into. Defaults to the primary control plane.": "", + "The none driver is not compatible with multi-node clusters.": "", + "The number of bytes to use for 9p packet payload": "", + "The number of nodes to spin up. Defaults to 1.": "", + "The output format. One of 'json', 'table'": "", + "The path on the file system where the docs in markdown need to be saved": "", + "The podman service within '{{.cluster}}' is not active": "", + "The service namespace": "", + "The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "", + "The services namespace": "", + "The time interval for each check that wait performs in seconds": "", + "The value passed to --format is invalid": "", + "The value passed to --format is invalid: {{.error}}": "", + "The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "", + "There is no local cluster named \"{{.cluster}}\"": "", + "These changes will take effect upon a minikube delete and then a minikube start": "", + "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", + "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "", + "This control plane is not running! (state={{.state}})": "", + "This driver does not yet work on your architecture. Maybe try --driver=none": "", + "This is unusual - you may want to investigate using \"{{.command}}\"": "", + "This will keep the existing kubectl context and will create a minikube context.": "", + "This will start the mount daemon and automatically mount files into minikube.": "", + "This {{.type}} is having trouble accessing https://{{.repository}}": "", + "Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "", + "To connect to this cluster, use: kubectl --context={{.name}}": "", + "To connect to this cluster, use: kubectl --context={{.profile_name}}": "", + "To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "", + "To fix this, run: \"{{.command}}\"": "", + "To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "", + "To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/": "", + "To see addons list for other profiles use: `minikube addons -p name list`": "", + "To start minikube with Hyper-V, Powershell must be in your PATH`": "", + "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "", + "Troubleshooting Commands:": "", + "Try 'minikube delete' to force new SSL certificates to be installed": "", + "Try 'minikube delete', and disable any conflicting VPN or firewall software": "", + "Try specifying a --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", + "Trying to delete invalid profile {{.profile}}": "", + "Unable to bind flags": "", + "Unable to enable dashboard": "", + "Unable to fetch latest version info": "", + "Unable to find control plane": "", + "Unable to generate docs": "", + "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", + "Unable to get command runner": "", + "Unable to get control plane status: {{.error}}": "", + "Unable to get current user": "", + "Unable to get forwarded endpoint": "", + "Unable to get machine status": "", + "Unable to get runtime": "", + "Unable to kill mount process: {{.error}}": "", + "Unable to load cached images: {{.error}}": "", + "Unable to load config: {{.error}}": "", + "Unable to load host": "", + "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "", + "Unable to parse default Kubernetes version from constants: {{.error}}": "", + "Unable to parse memory '{{.memory}}': {{.error}}": "", + "Unable to parse oldest Kubernetes version from constants: {{.error}}": "", + "Unable to pick a default driver. Here is what was considered, in preference order:": "", + "Unable to push cached images: {{error}}": "", + "Unable to remove machine directory": "", + "Unable to restart cluster, will reset it: {{.error}}": "", + "Unable to stop VM": "", + "Unable to update {{.driver}} driver: {{.error}}": "", + "Unable to verify SSH connectivity: {{.error}}. Will retry...": "", + "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...": "", + "Unmounting {{.path}} ...": "", + "Unpaused kubelet and {{.count}} containers": "", + "Unpaused kubelet and {{.count}} containers in: {{.namespaces}}": "", + "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "", + "Unset variables instead of setting them": "", + "Update server returned an empty list": "", + "Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "", + "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "", + "Usage": "", + "Usage: minikube completion SHELL": "", + "Usage: minikube delete": "", + "Usage: minikube delete --all --purge": "", + "Usage: minikube node [add|start|stop|delete]": "", + "Usage: minikube node delete [name]": "", + "Usage: minikube node start [name]": "", + "Usage: minikube node stop [name]": "", + "Use \"{{.CommandPath}} [command] --help\" for more information about a command.": "", + "Use 'kubect get po -A' to find the correct and namespace name": "", + "Use -A to specify all namespaces": "", + "Use VirtualBox to remove the conflicting VM and/or network interfaces": "", + "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.": "", + "User ID: {{.userID}}": "", + "Userspace file server is shutdown": "", + "Userspace file server:": "", + "Using image repository {{.name}}": "", + "Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "", + "Using the {{.driver}} driver based on existing profile": "", + "Using the {{.driver}} driver based on user configuration": "", + "Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "", + "Verify the IP address of the running cluster in kubeconfig.": "", + "Verifying dashboard health ...": "", + "Verifying proxy health ...": "", + "Version: {{.version}}": "", + "VirtualBox and Hyper-V are having a conflict. Use '--driver=hyperv' or disable Hyper-V using: 'bcdedit /set hypervisorlaunchtype off'": "", + "VirtualBox cannot create a network, probably because it conflicts with an existing network that minikube no longer knows about. Try running 'minikube delete'": "", + "VirtualBox is broken. Disable real-time anti-virus software, reboot, and reinstall VirtualBox if the problem continues.": "", + "VirtualBox is broken. Reinstall VirtualBox, reboot, and run 'minikube delete'.": "", + "VirtualBox is unable to find its network interface. Try upgrading to the latest release and rebooting.": "", + "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=docker'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "", + "Wait failed: {{.error}}": "", + "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "", + "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "", + "You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "", + "You can also use 'minikube kubectl -- get pods' to invoke a matching version": "", + "You can delete them using the following command(s):": "", + "You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.": "", + "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "", + "You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "", + "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "", + "You must specify a service name": "", + "Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "", + "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS": "", + "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "", + "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "", + "Your minikube vm is not running, try minikube start.": "", + "[{{.id}}] {{.msg}} {{.error}}": "", + "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "", + "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "", + "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "", + "bash completion failed": "", + "call with cleanup=true to remove old tunnels": "", + "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "", + "config view failed": "", + "dashboard service is not running: {{.error}}": "", + "deleting node": "", + "disable failed": "", + "dry-run mode. Validates configuration, but does not mutate system state": "", + "dry-run validation complete!": "", + "enable failed": "", + "error creating clientset": "", + "error getting primary control plane": "", + "error getting ssh port": "", + "error parsing the input ip address for mount": "", + "error provisioning host": "", + "error starting tunnel": "", + "error stopping tunnel": "", + "error: --output must be 'yaml' or 'json'": "", + "experimental": "", + "failed to add node": "", + "failed to open browser: {{.error}}": "", + "failed to start node": "", + "if true, will embed the certs in kubeconfig.": "", + "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "", + "initialization failed, will try again: {{.error}}": "", + "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "", + "kubectl and minikube configuration will be stored in {{.home_folder}}": "", + "kubectl proxy": "", + "libmachine failed": "", + "logdir set failed": "", + "max time to wait per Kubernetes core services to be healthy.": "", + "minikube addons list --output OUTPUT. json, list": "", + "minikube is exiting due to an error. If the above message is not useful, open an issue:": "", + "minikube is not yet compatible with ChromeOS": "", + "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "", + "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "", + "minikube profile was successfully set to {{.profile_name}}": "", + "minikube status --output OUTPUT. json, text": "", + "minikube {{.version}} is available! Download it: {{.url}}": "", + "mkcmp is used to compare performance of two minikube binaries": "", + "mount argument \"{{.value}}\" must be in form: \u003csource directory\u003e:\u003ctarget directory\u003e": "", + "mount failed": "", + "namespaces to pause": "", + "namespaces to unpause": "", + "none driver does not support multi-node clusters": "", + "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", + "pause containers": "", + "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "", + "provisioning host for node": "", + "reload cached images.": "", + "reloads images previously added using the 'cache add' subcommand": "", + "retrieving node": "", + "service {{.namespace_name}}/{{.service_name}} has no node port": "", + "stat failed": "", + "status json failure": "", + "status text failure": "", + "toom any arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", + "tunnel creates a route to services deployed with type LoadBalancer and sets their Ingress to their ClusterIP. for a detailed example see https://minikube.sigs.k8s.io/docs/tasks/loadbalancer": "", + "tunnel makes services of type LoadBalancer accessible on localhost": "", + "unable to bind flags": "", + "unable to delete minikube config folder": "", + "unable to set logtostderr": "", + "unpause Kubernetes": "", + "unset failed": "", + "unsets PROPERTY_NAME from the minikube config file. Can be overwritten by flags or environmental variables": "", + "unsets an individual value in a minikube config file": "", + "unsupported or missing driver: {{.name}}": "", + "update config": "", + "usage: minikube addons configure ADDON_NAME": "", + "usage: minikube addons disable ADDON_NAME": "", + "usage: minikube addons enable ADDON_NAME": "", + "usage: minikube addons list": "", + "usage: minikube addons open ADDON_NAME": "", + "usage: minikube config unset PROPERTY_NAME": "", + "usage: minikube delete": "", + "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "", + "version json failure": "", + "version yaml failure": "", + "zsh completion failed": "", + "{{ .name }}: {{ .rejection }}": "", + "{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "", + "{{.driver}} does not appear to be installed": "", + "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "", + "{{.extra_option_component_name}}.{{.key}}={{.value}}": "", + "{{.name}} has no available configuration options": "", + "{{.name}} is already running": "", + "{{.name}} was successfully configured": "", + "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "", + "{{.prefix}}minikube {{.version}} on {{.platform}}": "", + "{{.type}} is not yet a supported filesystem. We will try anyways!": "", + "{{.url}} is not accessible: {{.error}}": "" +} \ No newline at end of file