Merge branch 'master' into k8s-v1.18.1

pull/7714/head
Thomas Stromberg 2020-04-21 15:25:58 -07:00
commit e185606d94
137 changed files with 3706 additions and 986 deletions

537
.github/workflows/master.yml vendored Normal file
View File

@ -0,0 +1,537 @@
name: MasterCI
on:
push:
branches:
- master
paths:
- '**.go'
env:
GOPROXY: https://proxy.golang.org
jobs:
# Runs before all other jobs
# builds the minikube binaries
build_minikube:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: Download Dependencies
run : go mod download
- name: Build Binaries
run : |
make minikube-linux-amd64
make e2e-linux-amd64
cp -r test/integration/testdata ./out
whoami
echo github ref $GITHUB_REF
echo workflow $GITHUB_WORKFLOW
echo home $HOME
echo event name $GITHUB_EVENT_NAME
echo workspace $GITHUB_WORKSPACE
echo "end of debug stuff"
echo $(which jq)
- uses: actions/upload-artifact@v1
with:
name: minikube_binaries
path: out
lint:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: Install libvirt
run : |
sudo apt-get update
sudo apt-get install -y libvirt-dev
- name: Download Dependencies
run : go mod download
- name: Lint
env:
TESTSUITE: lintall
run : make test
continue-on-error: false
unit_test:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: Install libvirt
run : |
sudo apt-get update
sudo apt-get install -y libvirt-dev
- name: Download Dependencies
run : go mod download
- name: Unit Test
env:
TESTSUITE: unittest
run :
make test
continue-on-error: false
# Run the following integration tests after the build_minikube
# They will run in parallel and use the binaries in previous step
functional_test_docker_ubuntu:
needs: [build_minikube]
env:
TIME_ELAPSED: time
JOB_NAME: "functional_test_docker_ubuntu"
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-18.04
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: false
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.run TestFunctional -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: functional_test_docker_ubuntu
path: minikube_binaries/report
- name: The End Result functional_test_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
addons_certs_tests_docker_ubuntu:
runs-on: ubuntu-18.04
env:
TIME_ELAPSED: time
JOB_NAME: "addons_certs_tests_docker_ubuntu"
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
needs: [build_minikube]
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestAddons|TestCertOptions)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: addons_certs_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - addons_certs_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
multinode_pause_tests_docker_ubuntu:
runs-on: ubuntu-18.04
env:
TIME_ELAPSED: time
JOB_NAME: "multinode_pause_tests_docker_ubuntu"
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
needs: [build_minikube]
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPause|TestMultiNode)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: multinode_pause_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - multinode_pause_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
preload_docker_flags_tests_docker_ubuntu:
runs-on: ubuntu-18.04
env:
TIME_ELAPSED: time
JOB_NAME: "preload_docker_flags_tests_docker_ubuntu"
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
needs: [build_minikube]
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPreload|TestDockerFlags)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: preload_docker_flags_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - preload_docker_flags_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
functional_baremetal_ubuntu18_04:
needs: [build_minikube]
env:
TIME_ELAPSED: time
JOB_NAME: "functional_baremetal_ubuntu18_04"
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-18.04
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
# conntrack is required for kubernetes 1.18 and higher
# socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon
- name: Install tools for none
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install conntrack
sudo apt-get -qq -y install socat
VERSION="v1.17.0"
curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz
sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.run TestFunctional -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: none_ubuntu18_04
path: minikube_binaries/report
- name: The End Result - None on Ubuntu 18:04
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
# After all integration tests finished
# collect all the reports and upload them
upload_all_reports:
if: always()
needs: [functional_test_docker_ubuntu, addons_certs_tests_docker_ubuntu, multinode_pause_tests_docker_ubuntu, preload_docker_flags_tests_docker_ubuntu, functional_baremetal_ubuntu18_04]
runs-on: ubuntu-18.04
steps:
- name: download all reports
uses: actions/download-artifact@v2-preview
- name: upload all reports
shell: bash {0}
continue-on-error: true
run: |
mkdir -p all_reports
ls -lah
cp -r ./functional_test_docker_ubuntu ./all_reports/
cp -r ./addons_certs_tests_docker_ubuntu ./all_reports/
cp -r ./multinode_pause_tests_docker_ubuntu ./all_reports/
cp -r ./preload_docker_flags_tests_docker_ubuntu ./all_reports/
cp -r ./functional_baremetal_ubuntu18_04 ./all_reports/
- uses: actions/upload-artifact@v1
with:
name: all_reports
path: all_reports

View File

@ -1,15 +1,14 @@
name: CI
on: [pull_request]
on:
pull_request:
paths:
- '**.go'
env:
GOPROXY: https://proxy.golang.org
jobs:
# Runs before all other jobs
# builds the minikube binaries
build_minikube:
env:
TIME_ELAPSED: time
JOB_NAME: "Docker_Ubuntu_16_04"
GOPOGH_RESULT: ""
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
@ -19,8 +18,6 @@ jobs:
run : |
make minikube-linux-amd64
make e2e-linux-amd64
make minikube-windows-amd64.exe
make e2e-windows-amd64.exe
cp -r test/integration/testdata ./out
whoami
echo github ref $GITHUB_REF
@ -35,10 +32,6 @@ jobs:
name: minikube_binaries
path: out
lint:
env:
TIME_ELAPSED: time
JOB_NAME: "lint"
GOPOGH_RESULT: ""
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
@ -54,10 +47,6 @@ jobs:
run : make test
continue-on-error: false
unit_test:
env:
TIME_ELAPSED: time
JOB_NAME: "unit_test"
GOPOGH_RESULT: ""
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
@ -74,15 +63,15 @@ jobs:
make test
continue-on-error: false
# Run the following integration tests after the build_minikube
# They will run in parallel and use the binaries in previous step
docker_ubuntu_16_04:
# They will run in parallel and use the binaries in previous step
functional_test_docker_ubuntu:
needs: [build_minikube]
env:
TIME_ELAPSED: time
JOB_NAME: "Docker_Ubuntu_16_04"
JOB_NAME: "functional_test_docker_ubuntu"
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-16.04
runs-on: ubuntu-18.04
steps:
- name: Install kubectl
shell: bash
@ -112,7 +101,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -128,8 +117,10 @@ jobs:
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.run TestFunctional -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
@ -151,9 +142,9 @@ jobs:
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: docker_ubuntu_16_04
name: functional_test_docker_ubuntu
path: minikube_binaries/report
- name: The End Result Docker on ubuntu 16:04
- name: The End Result functional_test_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
@ -164,11 +155,11 @@ jobs:
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
docker_ubuntu_18_04:
addons_certs_tests_docker_ubuntu:
runs-on: ubuntu-18.04
env:
TIME_ELAPSED: time
JOB_NAME: "Docker_Ubuntu_18_04"
JOB_NAME: "addons_certs_tests_docker_ubuntu"
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
needs: [build_minikube]
@ -201,7 +192,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -217,8 +208,10 @@ jobs:
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestAddons|TestCertOptions)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
@ -240,9 +233,9 @@ jobs:
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: docker_ubuntu_18_04
name: addons_certs_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - Docker On Ubuntu 18:04
- name: The End Result - addons_certs_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
@ -253,78 +246,14 @@ jobs:
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
docker_on_windows:
needs: [build_minikube]
multinode_pause_tests_docker_ubuntu:
runs-on: ubuntu-18.04
env:
TIME_ELAPSED: time
JOB_NAME: "Docker_on_windows"
COMMIT_STATUS: ""
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Docker Info
shell: bash
run: |
docker info || true
docker version || true
docker ps || true
- name: Download gopogh
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh.exe
shell: bash
- name: Download binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: run integration test
continue-on-error: true
run: |
set +euo pipefail
mkdir -p report
mkdir -p testhome
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe -minikube-start-args=--vm-driver=docker -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds"
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
shell: bash
- name: Generate html report
run: |
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(${GITHUB_WORKSPACE}/gopogh.exe -in ./report/testout.json -out ./report/testout.html -name " $GITHUB_REF" -repo "${JOB_NAME} ${GITHUB_REF} ${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
shell: bash
- uses: actions/upload-artifact@v1
with:
name: docker_on_windows
path: report
- name: The End Result
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "--------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
shell: bash
none_ubuntu16_04:
needs: [build_minikube]
env:
TIME_ELAPSED: time
JOB_NAME: "None_Ubuntu_16_04"
JOB_NAME: "multinode_pause_tests_docker_ubuntu"
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-16.04
needs: [build_minikube]
steps:
- name: Install kubectl
shell: bash
@ -332,21 +261,29 @@ jobs:
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
# conntrack is required for kubernetes 1.18 and higher
# socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon
- name: Install tools for none
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install conntrack
sudo apt-get -qq -y install socat
VERSION="v1.17.0"
curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz
sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -362,8 +299,10 @@ jobs:
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPause|TestMultiNode)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
@ -385,9 +324,9 @@ jobs:
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: none_ubuntu16_04
name: multinode_pause_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - None On Ubuntu 16:04
- name: The End Result - multinode_pause_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
@ -398,11 +337,102 @@ jobs:
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
none_ubuntu18_04:
preload_docker_flags_tests_docker_ubuntu:
runs-on: ubuntu-18.04
env:
TIME_ELAPSED: time
JOB_NAME: "preload_docker_flags_tests_docker_ubuntu"
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
needs: [build_minikube]
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPreload|TestDockerFlags)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: preload_docker_flags_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - preload_docker_flags_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
functional_baremetal_ubuntu18_04:
needs: [build_minikube]
env:
TIME_ELAPSED: time
JOB_NAME: "None_Ubuntu_18_04"
JOB_NAME: "functional_baremetal_ubuntu18_04"
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-18.04
@ -427,7 +457,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -444,7 +474,7 @@ jobs:
chmod a+x e2e-*
chmod a+x minikube-*
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.v -timeout-multiplier=1.5 -test.run TestFunctional -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
@ -479,153 +509,26 @@ jobs:
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
podman_ubuntu_18_04_experimental:
needs: [build_minikube]
env:
TIME_ELAPSED: time
JOB_NAME: "Podman_Ubuntu_18_04"
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-18.04
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install podman
shell: bash
run: |
. /etc/os-release
sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list"
wget -q https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_${VERSION_ID}/Release.key -O- | sudo apt-key add -
sudo apt-key add - < Release.key || true
sudo apt-get update -qq
sudo apt-get -qq -y install podman
sudo podman version || true
sudo podman info || true
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=podman -test.timeout=30m -test.v -timeout-multiplier=1 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: podman_ubuntu_18_04
path: minikube_binaries/report
- name: The End Result - Podman On Ubuntu 18:04
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
# After all 4 integration tests finished
# collect all the reports and upload
# After all integration tests finished
# collect all the reports and upload them
upload_all_reports:
if: always()
needs: [docker_ubuntu_16_04,docker_ubuntu_18_04,none_ubuntu16_04,none_ubuntu18_04,podman_ubuntu_18_04_experimental]
needs: [functional_test_docker_ubuntu, addons_certs_tests_docker_ubuntu, multinode_pause_tests_docker_ubuntu, preload_docker_flags_tests_docker_ubuntu, functional_baremetal_ubuntu18_04]
runs-on: ubuntu-18.04
steps:
- name: Download Results docker_ubuntu_16_04
uses: actions/download-artifact@v1
with:
name: docker_ubuntu_16_04
- name: cp docker_ubuntu_16_04 to all_report
continue-on-error: true
- name: download all reports
uses: actions/download-artifact@v2-preview
- name: upload all reports
shell: bash {0}
run: |
mkdir -p all_reports
cp -r docker_ubuntu_16_04 ./all_reports/
- name: Download Results docker_ubuntu_18_04
uses: actions/download-artifact@v1
with:
name: docker_ubuntu_18_04
- name: cp docker_ubuntu_18_04 to all_report
continue-on-error: true
shell: bash {0}
run: |
mkdir -p all_reports
cp -r docker_ubuntu_18_04 ./all_reports/
- name: download results docker_on_windows
uses: actions/download-artifact@v1
with:
name: docker_on_windows
- name: cp to all_report
shell: bash
run: |
mkdir -p all_reports
cp -r docker_on_windows ./all_reports/
- name: Download Results none_ubuntu16_04
uses: actions/download-artifact@v1
with:
name: none_ubuntu16_04
- name: cp none_ubuntu16_04 to all_report
continue-on-error: true
shell: bash {0}
run: |
mkdir -p all_reports
cp -r none_ubuntu16_04 ./all_reports/
- name: Download Results none_ubuntu18_04
uses: actions/download-artifact@v1
with:
name: none_ubuntu18_04
- name: Copy none_ubuntu18_04 to all_report
continue-on-error: true
shell: bash {0}
run: |
mkdir -p all_reports
cp -r none_ubuntu18_04 ./all_reports/
- name: Download Results podman_ubuntu_18_04
uses: actions/download-artifact@v1
with:
name: podman_ubuntu_18_04
- name: Copy podman_ubuntu_18_04 to all_report
continue-on-error: true
shell: bash {0}
run: |
mkdir -p all_reports
cp -r podman_ubuntu_18_04 ./all_reports/
ls -lah
cp -r ./functional_test_docker_ubuntu ./all_reports/
cp -r ./addons_certs_tests_docker_ubuntu ./all_reports/
cp -r ./multinode_pause_tests_docker_ubuntu ./all_reports/
cp -r ./preload_docker_flags_tests_docker_ubuntu ./all_reports/
cp -r ./functional_baremetal_ubuntu18_04 ./all_reports/
- uses: actions/upload-artifact@v1
with:
name: all_reports

View File

@ -1,5 +1,72 @@
# Release Notes
## Version 1.10.0-beta.0 - 2020-04-20
Improvements:
* faster containerd start by preloading images [#7793](https://github.com/kubernetes/minikube/pull/7793)
* Add fish completion support [#7777](https://github.com/kubernetes/minikube/pull/7777)
* Behavior change: start with no arguments uses existing cluster config [#7449](https://github.com/kubernetes/minikube/pull/7449)
* conformance: add --wait=all, reduce quirks [#7716](https://github.com/kubernetes/minikube/pull/7716)
* Upgrade minimum supported k8s version to v1.12 [#7723](https://github.com/kubernetes/minikube/pull/7723)
* Add default CNI network for running wth podman [#7754](https://github.com/kubernetes/minikube/pull/7754)
* Behavior change: fallback to alternate drivers on failure [#7389](https://github.com/kubernetes/minikube/pull/7389)
* Add registry addon feature for docker on mac/windows [#7603](https://github.com/kubernetes/minikube/pull/7603)
* Check node pressure & new option "node_ready" for --wait flag [#7752](https://github.com/kubernetes/minikube/pull/7752)
* docker driver: Add Service & Tunnel features to windows [#7739](https://github.com/kubernetes/minikube/pull/7739)
* Add master node/worker node type to `minikube status` [#7586](https://github.com/kubernetes/minikube/pull/7586)
* Add new wait component apps_running [#7460](https://github.com/kubernetes/minikube/pull/7460)
* none: Add support for OpenRC init (Google CloudShell) [#7539](https://github.com/kubernetes/minikube/pull/7539)
* Upgrade falco-probe module to version 0.21.0 [#7436](https://github.com/kubernetes/minikube/pull/7436)
Bug Fixes:
* Fix multinode cluster creation for VM drivers [#7700](https://github.com/kubernetes/minikube/pull/7700)
* tunnel: Fix resolver file permissions, add DNS forwarding test [#7753](https://github.com/kubernetes/minikube/pull/7753)
* unconfine apparmor for kic [#7658](https://github.com/kubernetes/minikube/pull/7658)
* Fix `minikube delete` output nodename missing with docker/podman driver [#7553](https://github.com/kubernetes/minikube/pull/7553)
* Respect driver.FlagDefaults even if --extra-config is set [#7509](https://github.com/kubernetes/minikube/pull/7509)
* remove docker/podman overlay network for docker-runtime [#7425](https://github.com/kubernetes/minikube/pull/7425)
Huge thank you for this release towards our contributors:
- Alonyb
- Anders F Björklund
- Anshul Sirur
- Balint Pato
- Batuhan Apaydın
- Brad Walker
- Frank Schwichtenberg
- Kenta Iso
- Medya Ghazizadeh
- Michael Vorburger ⛑️
- Pablo Caderno
- Prasad Katti
- Priya Wadhwa
- Radoslaw Smigielski
- Ruben Baez
- Sharif Elgamal
- Thomas Strömberg
- Vikky Omkar
- ZouYu
- gorbondiga
- loftkun
- nestoralonso
- remraz
- sayboras
- tomocy
Thank you so much to users who helped with community triage:
- ps-feng
- Prasad Katti
And big thank you to those who participated in our docs fixit week:
- matjung
- jlaswell
- remraz
## Version 1.9.2 - 2020-04-04
Minor improvements:

View File

@ -14,8 +14,8 @@
# Bump these on release - and please check ISO_VERSION for correctness.
VERSION_MAJOR ?= 1
VERSION_MINOR ?= 9
VERSION_BUILD ?= 2
VERSION_MINOR ?= 10
VERSION_BUILD ?= 0-beta.0
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
VERSION ?= v$(RAW_VERSION)
@ -23,7 +23,7 @@ KUBERNETES_VERSION ?= $(shell egrep "DefaultKubernetesVersion =" pkg/minikube/co
KIC_VERSION ?= $(shell egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2)
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).0
ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
# Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta
DEB_VERSION ?= $(subst -,~,$(RAW_VERSION))
RPM_VERSION ?= $(DEB_VERSION)
@ -274,6 +274,10 @@ test: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go ## Tr
generate-docs: out/minikube ## Automatically generate commands documentation.
out/minikube generate-docs --path ./site/content/en/docs/commands/
.PHONY: gotest
gotest: $(SOURCE_GENERATED) ## Trigger minikube test
go test -tags "$(MINIKUBE_BUILD_TAGS)" -ldflags="$(MINIKUBE_LDFLAGS)" $(MINIKUBE_TEST_FILES)
.PHONY: extract
extract: ## Compile extract tool
go run cmd/extract/extract.go
@ -393,6 +397,10 @@ reportcard: ## Run goreportcard for minikube
mdlint:
@$(MARKDOWNLINT) $(MINIKUBE_MARKDOWN_FILES)
.PHONY: verify-iso
verify-iso: # Make sure the current ISO exists in the expected bucket
gsutil stat gs://$(ISO_BUCKET)/minikube-$(ISO_VERSION).iso
out/docs/minikube.md: $(shell find "cmd") $(shell find "pkg/minikube/constants") pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go
go run -ldflags="$(MINIKUBE_LDFLAGS)" -tags gendocs hack/help_text/gen_help_text.go

View File

@ -61,8 +61,10 @@ minikube is a Kubernetes [#sig-cluster-lifecycle](https://github.com/kubernetes/
* [**#minikube on Kubernetes Slack**](https://kubernetes.slack.com) - Live chat with minikube developers!
* [minikube-users mailing list](https://groups.google.com/forum/#!forum/minikube-users)
* [minikube-dev mailing list](https://groups.google.com/forum/#!forum/minikube-dev)
* [Bi-weekly office hours, Mondays @ 11am PST](https://tinyurl.com/minikube-oh)
* [Contributing](https://minikube.sigs.k8s.io/docs/contrib/)
* [Development Roadmap](https://minikube.sigs.k8s.io/docs/contrib/roadmap/)
Join our meetings:
* [Bi-weekly office hours, Mondays @ 11am PST](https://tinyurl.com/minikube-oh)
* [Triage Party](https://minikube.sigs.k8s.io/docs/contrib/triage/)

View File

@ -28,7 +28,7 @@ import (
)
const longDescription = `
Outputs minikube shell completion for the given shell (bash or zsh)
Outputs minikube shell completion for the given shell (bash, zsh or fish)
This depends on the bash-completion binary. Example installation instructions:
OS X:
@ -37,15 +37,18 @@ const longDescription = `
$ minikube completion bash > ~/.minikube-completion # for bash users
$ minikube completion zsh > ~/.minikube-completion # for zsh users
$ source ~/.minikube-completion
$ minikube completion fish > ~/.config/fish/completions/minikube.fish # for fish users
Ubuntu:
$ apt-get install bash-completion
$ source /etc/bash-completion
$ source <(minikube completion bash) # for bash users
$ source <(minikube completion zsh) # for zsh users
$ minikube completion fish > ~/.config/fish/completions/minikube.fish # for fish users
Additionally, you may want to output the completion to a file and source in your .bashrc
Note for zsh users: [1] zsh completions are only supported in versions of zsh >= 5.2
Note for fish users: [2] please refer to this docs for more details https://fishshell.com/docs/current/#tab-completion
`
const boilerPlate = `
@ -66,24 +69,29 @@ const boilerPlate = `
var completionCmd = &cobra.Command{
Use: "completion SHELL",
Short: "Outputs minikube shell completion for the given shell (bash or zsh)",
Short: "Outputs minikube shell completion for the given shell (bash, zsh or fish)",
Long: longDescription,
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
exit.UsageT("Usage: minikube completion SHELL")
}
if args[0] != "bash" && args[0] != "zsh" {
if args[0] != "bash" && args[0] != "zsh" && args[0] != "fish" {
exit.UsageT("Sorry, completion support is not yet implemented for {{.name}}", out.V{"name": args[0]})
} else if args[0] == "bash" {
err := GenerateBashCompletion(os.Stdout, cmd.Parent())
if err != nil {
exit.WithError("bash completion failed", err)
}
} else {
} else if args[0] == "zsh" {
err := GenerateZshCompletion(os.Stdout, cmd.Parent())
if err != nil {
exit.WithError("zsh completion failed", err)
}
} else {
err := GenerateFishCompletion(os.Stdout, cmd.Parent())
if err != nil {
exit.WithError("fish completion failed", err)
}
}
},
@ -279,3 +287,18 @@ __minikube_bash_source <(__minikube_convert_bash_to_zsh)
return nil
}
// GenerateBashCompletion generates the completion for the bash shell
func GenerateFishCompletion(w io.Writer, cmd *cobra.Command) error {
_, err := w.Write([]byte(boilerPlate))
if err != nil {
return err
}
err = cmd.GenFishCompletion(w, true)
if err != nil {
return errors.Wrap(err, "Error generating fish completion")
}
return nil
}

View File

@ -100,8 +100,11 @@ var addonsConfigureCmd = &cobra.Command{
acrPassword = AskForPasswordValue("-- Enter service principal password to access Azure Container Registry: ")
}
cname := ClusterFlagValue()
// Create ECR Secret
err := service.CreateSecret(
cname,
"kube-system",
"registry-creds-ecr",
map[string]string{
@ -124,6 +127,7 @@ var addonsConfigureCmd = &cobra.Command{
// Create GCR Secret
err = service.CreateSecret(
cname,
"kube-system",
"registry-creds-gcr",
map[string]string{
@ -142,6 +146,7 @@ var addonsConfigureCmd = &cobra.Command{
// Create Docker Secret
err = service.CreateSecret(
cname,
"kube-system",
"registry-creds-dpr",
map[string]string{
@ -161,6 +166,7 @@ var addonsConfigureCmd = &cobra.Command{
// Create Azure Container Registry Secret
err = service.CreateSecret(
cname,
"kube-system",
"registry-creds-acr",
map[string]string{

View File

@ -77,7 +77,7 @@ minikube addons enable {{.name}}`, out.V{"name": addonName})
namespace := "kube-system"
key := "kubernetes.io/minikube-addons-endpoint"
serviceList, err := service.GetServiceListByLabel(namespace, key, addonName)
serviceList, err := service.GetServiceListByLabel(cname, namespace, key, addonName)
if err != nil {
exit.WithCodeT(exit.Unavailable, "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}", out.V{"namespace": namespace, "labelName": key, "addonName": addonName, "error": err})
}
@ -89,7 +89,7 @@ You can add one by annotating a service with the label {{.labelName}}:{{.addonNa
svc := serviceList.Items[i].ObjectMeta.Name
var urlString []string
if urlString, err = service.WaitForService(co.API, namespace, svc, addonsURLTemplate, addonsURLMode, https, wait, interval); err != nil {
if urlString, err = service.WaitForService(co.API, co.Config.Name, namespace, svc, addonsURLTemplate, addonsURLMode, https, wait, interval); err != nil {
exit.WithCodeT(exit.Unavailable, "Wait failed: {{.error}}", out.V{"error": err})
}

View File

@ -44,6 +44,11 @@ var ProfileCmd = &cobra.Command{
}
profile := args[0]
// Check whether the profile name is container friendly
if !config.ProfileNameValid(profile) {
out.WarningT("Profile name '{{.profilename}}' is not valid", out.V{"profilename": profile})
exit.UsageT("Only alphanumeric, dots, underscores and dashes '-' are permitted. Minimum 2 characters, starting by alphanumeric.")
}
/**
we need to add code over here to check whether the profile
name is in the list of reserved keywords

View File

@ -83,8 +83,9 @@ var dashboardCmd = &cobra.Command{
ns := "kubernetes-dashboard"
svc := "kubernetes-dashboard"
out.ErrT(out.Verifying, "Verifying dashboard health ...")
checkSVC := func() error { return service.CheckService(ns, svc) }
if err = retry.Expo(checkSVC, 1*time.Second, time.Minute*5); err != nil {
checkSVC := func() error { return service.CheckService(cname, ns, svc) }
// for slow machines or parallels in CI to avoid #7503
if err = retry.Expo(checkSVC, 100*time.Microsecond, time.Minute*10); err != nil {
exit.WithCodeT(exit.Unavailable, "dashboard service is not running: {{.error}}", out.V{"error": err})
}
@ -97,7 +98,7 @@ var dashboardCmd = &cobra.Command{
out.ErrT(out.Verifying, "Verifying proxy health ...")
chkURL := func() error { return checkURL(url) }
if err = retry.Expo(chkURL, 1*time.Second, 3*time.Minute); err != nil {
if err = retry.Expo(chkURL, 100*time.Microsecond, 10*time.Minute); err != nil {
exit.WithCodeT(exit.Unavailable, "{{.url}} is not accessible: {{.error}}", out.V{"url": url, "error": err})
}

View File

@ -147,6 +147,8 @@ func runDelete(cmd *cobra.Command, args []string) {
out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": cname})
}
deletePossibleKicLeftOver(cname)
errs := DeleteProfiles([]*config.Profile{profile})
if len(errs) > 0 {
HandleDeletionErrors(errs)
@ -189,20 +191,30 @@ func DeleteProfiles(profiles []*config.Profile) []error {
return errs
}
func deleteProfileContainersAndVolumes(name string) {
func deletePossibleKicLeftOver(name string) {
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, name)
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
if errs != nil { // it will error if there is no container to delete
glog.Infof("error deleting containers for %s (might be okay):\n%v", name, errs)
}
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
if errs != nil { // it will not error if there is nothing to delete
glog.Warningf("error deleting volumes (might be okay).\nTo see the list of volumes run: 'docker volume ls'\n:%v", errs)
}
for _, bin := range []string{oci.Docker, oci.Podman} {
cs, err := oci.ListContainersByLabel(bin, delLabel)
if err == nil && len(cs) > 0 {
for _, c := range cs {
out.T(out.DeletingHost, `Deleting container "{{.name}}" ...`, out.V{"name": name})
err := oci.DeleteContainer(bin, c)
if err != nil { // it will error if there is no container to delete
glog.Errorf("error deleting container %q. you might want to delete that manually :\n%v", name, err)
}
errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error pruning volume (might be okay):\n%v", errs)
}
}
errs := oci.DeleteAllVolumesByLabel(bin, delLabel)
if errs != nil { // it will not error if there is nothing to delete
glog.Warningf("error deleting volumes (might be okay).\nTo see the list of volumes run: 'docker volume ls'\n:%v", errs)
}
errs = oci.PruneAllVolumesByLabel(bin, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error pruning volume (might be okay):\n%v", errs)
}
}
}
@ -212,7 +224,7 @@ func deleteProfile(profile *config.Profile) error {
// if driver is oci driver, delete containers and volumes
if driver.IsKIC(profile.Config.Driver) {
out.T(out.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": profile.Name, "driver_name": profile.Config.Driver})
deleteProfileContainersAndVolumes(profile.Name)
deletePossibleKicLeftOver(profile.Name)
}
}

View File

@ -65,6 +65,14 @@ func TestDeleteProfile(t *testing.T) {
if err != nil {
t.Fatalf("tempdir: %v", err)
}
defer func() { //clean up tempdir
err := os.RemoveAll(td)
if err != nil {
t.Errorf("failed to clean up temp folder %q", td)
}
}()
err = copy.Copy("../../../pkg/minikube/config/testdata/delete-single", td)
if err != nil {
t.Fatalf("copy: %v", err)
@ -151,6 +159,13 @@ func TestDeleteAllProfiles(t *testing.T) {
if err != nil {
t.Fatalf("tempdir: %v", err)
}
defer func() { //clean up tempdir
err := os.RemoveAll(td)
if err != nil {
t.Errorf("failed to clean up temp folder %q", td)
}
}()
err = copy.Copy("../../../pkg/minikube/config/testdata/delete-all", td)
if err != nil {
t.Fatalf("copy: %v", err)

View File

@ -75,7 +75,6 @@ var nodeStartCmd = &cobra.Command{
}
func init() {
nodeStartCmd.Flags().String("name", "", "The name of the node to start")
nodeStartCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
nodeCmd.AddCommand(nodeStartCmd)
}

View File

@ -53,6 +53,5 @@ var nodeStopCmd = &cobra.Command{
}
func init() {
nodeStopCmd.Flags().String("name", "", "The name of the node to delete")
nodeCmd.AddCommand(nodeStopCmd)
}

View File

@ -33,7 +33,9 @@ import (
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/kapi"
"k8s.io/minikube/pkg/minikube/browser"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/mustload"
@ -78,12 +80,12 @@ var serviceCmd = &cobra.Command{
cname := ClusterFlagValue()
co := mustload.Healthy(cname)
if runtime.GOOS == "darwin" && co.Config.Driver == oci.Docker {
if driver.NeedsPortForward(co.Config.Driver) {
startKicServiceTunnel(svc, cname)
return
}
urls, err := service.WaitForService(co.API, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval)
urls, err := service.WaitForService(co.API, co.Config.Name, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval)
if err != nil {
var s *service.SVCNotFoundError
if errors.As(err, &s) {
@ -112,7 +114,7 @@ func startKicServiceTunnel(svc, configName string) {
ctrlC := make(chan os.Signal, 1)
signal.Notify(ctrlC, os.Interrupt)
clientset, err := service.K8s.GetClientset(1 * time.Second)
clientset, err := kapi.Client(configName)
if err != nil {
exit.WithError("error creating clientset", err)
}
@ -137,7 +139,7 @@ func startKicServiceTunnel(svc, configName string) {
service.PrintServiceList(os.Stdout, data)
openURLs(svc, urls)
out.WarningT("Because you are using docker driver on Mac, the terminal needs to be open to run it.")
out.WarningT("Because you are using a Docker driver on {{.operating_system}}, the terminal needs to be open to run it.", out.V{"operating_system": runtime.GOOS})
<-ctrlC

View File

@ -40,7 +40,7 @@ var serviceListCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
co := mustload.Healthy(ClusterFlagValue())
serviceURLs, err := service.GetServiceURLs(co.API, serviceListNamespace, serviceURLTemplate)
serviceURLs, err := service.GetServiceURLs(co.API, co.Config.Name, serviceListNamespace, serviceURLTemplate)
if err != nil {
out.FatalT("Failed to get service URL: {{.error}}", out.V{"error": err})
out.ErrT(out.Notice, "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.")

View File

@ -144,6 +144,10 @@ func runStart(cmd *cobra.Command, args []string) {
registryMirror = viper.GetStringSlice("registry_mirror")
}
if !config.ProfileNameValid(ClusterFlagValue()) {
out.WarningT("Profile name '{{.name}}' is not valid", out.V{"name": ClusterFlagValue()})
exit.UsageT("Only alphanumeric, dots, underscores and dashes '-' are permitted. Minimum 2 characters, starting by alphanumeric.")
}
existing, err := config.Load(ClusterFlagValue())
if err != nil && !config.IsNotExist(err) {
exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err})

View File

@ -255,7 +255,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
if strings.ToLower(repository) == "auto" || mirrorCountry != "" {
found, autoSelectedRepository, err := selectImageRepository(mirrorCountry, semver.MustParse(strings.TrimPrefix(k8sVersion, version.VersionPrefix)))
if err != nil {
exit.WithError("Failed to check main repository and mirrors for images for images", err)
exit.WithError("Failed to check main repository and mirrors for images", err)
}
if !found {
@ -269,7 +269,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
repository = autoSelectedRepository
}
if cmd.Flags().Changed(imageRepository) {
if cmd.Flags().Changed(imageRepository) || cmd.Flags().Changed(imageMirrorCountry) {
out.T(out.SuccessType, "Using image repository {{.name}}", out.V{"name": repository})
}

View File

@ -73,6 +73,7 @@ const (
clusterNotRunningStatusFlag = 1 << 1
k8sNotRunningStatusFlag = 1 << 2
defaultStatusFormat = `{{.Name}}
type: Control Plane
host: {{.Host}}
kubelet: {{.Kubelet}}
apiserver: {{.APIServer}}
@ -80,6 +81,7 @@ kubeconfig: {{.Kubeconfig}}
`
workerStatusFormat = `{{.Name}}
type: Worker
host: {{.Host}}
kubelet: {{.Kubelet}}
@ -102,12 +104,11 @@ var statusCmd = &cobra.Command{
cname := ClusterFlagValue()
api, cc := mustload.Partial(cname)
var st *Status
var err error
var statuses []*Status
for _, n := range cc.Nodes {
glog.Infof("checking status of %s ...", n.Name)
machineName := driver.MachineName(*cc, n)
st, err = status(api, *cc, n)
st, err := status(api, *cc, n)
glog.Infof("%s status: %+v", machineName, st)
if err != nil {
@ -116,36 +117,40 @@ var statusCmd = &cobra.Command{
if st.Host == Nonexistent {
glog.Errorf("The %q host does not exist!", machineName)
}
statuses = append(statuses, st)
}
switch strings.ToLower(output) {
case "text":
switch strings.ToLower(output) {
case "text":
for _, st := range statuses {
if err := statusText(st, os.Stdout); err != nil {
exit.WithError("status text failure", err)
}
case "json":
if err := statusJSON(st, os.Stdout); err != nil {
exit.WithError("status json failure", err)
}
default:
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
}
case "json":
if err := statusJSON(statuses, os.Stdout); err != nil {
exit.WithError("status json failure", err)
}
default:
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
}
// TODO: Update for multi-node
os.Exit(exitCode(st))
os.Exit(exitCode(statuses))
},
}
func exitCode(st *Status) int {
func exitCode(statuses []*Status) int {
c := 0
if st.Host != state.Running.String() {
c |= minikubeNotRunningStatusFlag
}
if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() {
c |= clusterNotRunningStatusFlag
}
if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant {
c |= k8sNotRunningStatusFlag
for _, st := range statuses {
if st.Host != state.Running.String() {
c |= minikubeNotRunningStatusFlag
}
if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() {
c |= clusterNotRunningStatusFlag
}
if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant {
c |= k8sNotRunningStatusFlag
}
}
return c
}
@ -268,8 +273,15 @@ func statusText(st *Status, w io.Writer) error {
return nil
}
func statusJSON(st *Status, w io.Writer) error {
js, err := json.Marshal(st)
func statusJSON(st []*Status, w io.Writer) error {
var js []byte
var err error
// Keep backwards compat with single node clusters to not break anyone
if len(st) == 1 {
js, err = json.Marshal(st[0])
} else {
js, err = json.Marshal(st)
}
if err != nil {
return err
}

View File

@ -35,7 +35,7 @@ func TestExitCode(t *testing.T) {
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := exitCode(tc.state)
got := exitCode([]*Status{tc.state})
if got != tc.want {
t.Errorf("exitcode(%+v) = %d, want: %d", tc.state, got, tc.want)
}
@ -52,17 +52,17 @@ func TestStatusText(t *testing.T) {
{
name: "ok",
state: &Status{Name: "minikube", Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured},
want: "minikube\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n\n",
want: "minikube\ntype: Control Plane\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n\n",
},
{
name: "paused",
state: &Status{Name: "minikube", Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured},
want: "minikube\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n\n",
want: "minikube\ntype: Control Plane\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n\n",
},
{
name: "down",
state: &Status{Name: "minikube", Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured},
want: "minikube\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n",
want: "minikube\ntype: Control Plane\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n",
},
}
for _, tc := range tests {
@ -93,7 +93,7 @@ func TestStatusJSON(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
var b bytes.Buffer
err := statusJSON(tc.state, &b)
err := statusJSON([]*Status{tc.state}, &b)
if err != nil {
t.Errorf("json(%+v) error: %v", tc.state, err)
}

View File

@ -21,19 +21,18 @@ import (
"os"
"os/signal"
"path/filepath"
"runtime"
"strconv"
"time"
"github.com/golang/glog"
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/kapi"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/service"
"k8s.io/minikube/pkg/minikube/tunnel"
"k8s.io/minikube/pkg/minikube/tunnel/kic"
)
@ -65,7 +64,7 @@ var tunnelCmd = &cobra.Command{
// We define the tunnel and minikube error free if the API server responds within a second.
// This also contributes to better UX, the tunnel status check can happen every second and
// doesn't hang on the API server call during startup and shutdown time or if there is a temporary error.
clientset, err := service.K8s.GetClientset(1 * time.Second)
clientset, err := kapi.Client(cname)
if err != nil {
exit.WithError("error creating clientset", err)
}
@ -78,7 +77,8 @@ var tunnelCmd = &cobra.Command{
cancel()
}()
if runtime.GOOS == "darwin" && co.Config.Driver == oci.Docker {
if driver.NeedsPortForward(co.Config.Driver) {
port, err := oci.ForwardedPort(oci.Docker, cname, 22)
if err != nil {
exit.WithError("error getting ssh port", err)

View File

@ -35,7 +35,11 @@ var rootCmd = &cobra.Command{
return validateArgs(args)
},
RunE: func(cmd *cobra.Command, args []string) error {
return perf.CompareMinikubeStart(context.Background(), os.Stdout, args)
binaries, err := retrieveBinaries(args)
if err != nil {
return err
}
return perf.CompareMinikubeStart(context.Background(), os.Stdout, binaries)
},
}
@ -46,6 +50,18 @@ func validateArgs(args []string) error {
return nil
}
func retrieveBinaries(args []string) ([]*perf.Binary, error) {
binaries := []*perf.Binary{}
for _, a := range args {
binary, err := perf.NewBinary(a)
if err != nil {
return nil, err
}
binaries = append(binaries, binary)
}
return binaries, nil
}
// Execute runs the mkcmp command
func Execute() {
if err := rootCmd.Execute(); err != nil {

0
default.profraw Normal file
View File

View File

@ -1 +1 @@
The documentation for building and hacking on the minikube ISO can be found at [/docs/contributors/minikube_iso.md](/docs/contributors/minikube_iso.md).
The documentation for building and hacking on the minikube ISO can be found [here](https://minikube.sigs.k8s.io/docs/contrib/building/iso/).

View File

@ -114,6 +114,7 @@ CONFIG_TCP_CONG_ADVANCED=y
CONFIG_TCP_MD5SIG=y
CONFIG_INET6_AH=y
CONFIG_INET6_ESP=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_NETLABEL=y
CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_ACCT=y
@ -351,6 +352,7 @@ CONFIG_NETCONSOLE=y
CONFIG_TUN=y
CONFIG_VETH=y
CONFIG_VIRTIO_NET=y
CONFIG_NET_VRF=m
CONFIG_AMD8111_ETH=m
CONFIG_PCNET32=m
CONFIG_PCMCIA_NMCLAN=m

View File

@ -109,6 +109,10 @@ if [ -n "$BOOT2DOCKER_DATA" ]; then
mkdir /var/log
mount --bind /mnt/$PARTNAME/var/log /var/log
mkdir -p /mnt/$PARTNAME/var/tmp
mkdir /var/tmp
mount --bind /mnt/$PARTNAME/var/tmp /var/tmp
mkdir -p /mnt/$PARTNAME/var/lib/kubelet
mkdir /var/lib/kubelet
mount --bind /mnt/$PARTNAME/var/lib/kubelet /var/lib/kubelet

View File

@ -29,6 +29,8 @@ endef
define PODMAN_INSTALL_TARGET_CMDS
$(INSTALL) -Dm755 $(@D)/bin/podman $(TARGET_DIR)/usr/bin/podman
$(INSTALL) -d -m 755 $(TARGET_DIR)/etc/cni/net.d/
$(INSTALL) -m 644 cni/87-podman-bridge.conflist $(TARGET_DIR)/etc/cni/net.d/87-podman-bridge.conflist
endef
$(eval $(generic-package))

2
go.mod
View File

@ -70,7 +70,7 @@ require (
github.com/samalba/dockerclient v0.0.0-20160414174713-91d7393ff859 // indirect
github.com/shirou/gopsutil v2.18.12+incompatible
github.com/spf13/cast v1.3.1 // indirect
github.com/spf13/cobra v0.0.5
github.com/spf13/cobra v1.0.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.6.1
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect

9
go.sum
View File

@ -156,6 +156,8 @@ github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfc
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
@ -706,6 +708,8 @@ github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNue
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday v1.5.3-0.20200218234912-41c5fccfd6f6 h1:tlXG832s5pa9x9Gs3Rp2rTvEqjiDEuETUOSfBEiTcns=
github.com/russross/blackfriday v1.5.3-0.20200218234912-41c5fccfd6f6/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sayboras/dockerclient v0.0.0-20191231050035-015626177a97 h1:DWY4yZN6w+FSKMeqBBXaalT8zmCn4DVwBGopShnlwFE=
@ -722,6 +726,8 @@ github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+D
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@ -750,6 +756,8 @@ github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
@ -764,6 +772,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.6.1 h1:VPZzIkznI1YhVMRi6vNFLHSwhnhReBfgTxIPccpfdZk=
github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY=

View File

@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/bash
# Copyright 2019 The Kubernetes Authors All rights reserved.
#
@ -27,15 +27,16 @@ set -ex -o pipefail
readonly PROFILE_NAME="k8sconformance"
readonly MINIKUBE=${1:-./out/minikube}
shift || true
readonly START_ARGS=$*
# Requires a fully running Kubernetes cluster.
"${MINIKUBE}" delete -p "${PROFILE_NAME}" || true
"${MINIKUBE}" start -p "${PROFILE_NAME}" $START_ARGS
"${MINIKUBE}" start -p "${PROFILE_NAME}" --wait=all
kubectl --context "${PROFILE_NAME}" get pods --all-namespaces
"${MINIKUBE}" status -p "${PROFILE_NAME}"
kubectl get pods --all-namespaces
go get -u -v github.com/heptio/sonobuoy
go get -u -v github.com/vmware-tanzu/sonobuoy
sonobuoy run --wait
outdir="$(mktemp -d)"
sonobuoy retrieve "${outdir}"
@ -47,8 +48,8 @@ mkdir ./results; tar xzf *.tar.gz -C ./results
version=$(${MINIKUBE} version | cut -d" " -f3)
mkdir minikube-${version}
cd minikube-${version}
mkdir "minikube-${version}"
cd "minikube-${version}"
cat <<EOF >PRODUCT.yaml
vendor: minikube
@ -68,4 +69,4 @@ EOF
cp ../results/plugins/e2e/results/* .
cd ..
cp -r minikube-${version} ${cwd}
cp -r "minikube-${version}" "${cwd}"

View File

@ -338,9 +338,9 @@ fi
echo ">> Installing gopogh"
if [ "$(uname)" != "Darwin" ]; then
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64 && sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64 && sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
else
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-darwin-amd64 && sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-darwin-amd64 && sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
fi
echo ">> Running gopogh"

View File

@ -27,7 +27,7 @@ set -e
OS_ARCH="linux-amd64"
VM_DRIVER="podman"
JOB_NAME="Podman_Linux"
JOB_NAME="Experimental_Podman_Linux"
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"

View File

@ -42,6 +42,7 @@ jobs=(
'none_Linux'
'Docker_Linux'
'Docker_macOS'
'Docker_Windows'
'Podman_Linux'
)

View File

@ -33,8 +33,13 @@ EXTRA_START_ARGS=""
EXPECTED_DEFAULT_DRIVER="hyperkit"
# restart docker on mac for a fresh test
osascript -e 'quit app "Docker"'; open -a Docker ; while [ -z "$(docker info 2> /dev/null )" ]; do printf "."; sleep 1; done; echo "" || true
# fix mac os as a service on mac os
# https://github.com/docker/for-mac/issues/882#issuecomment-506372814
osascript -e 'quit app "Docker"';
sudo /Applications/Docker.app/Contents/MacOS/Docker --quit-after-install --unattended || true
# repeating without sudo because https://github.com/docker/for-mac/issues/882#issuecomment-516946766
/Applications/Docker.app/Contents/MacOS/Docker --quit-after-install --unattended || true
osascript -e 'quit app "Docker"'; /Applications/Docker.app/Contents/MacOS/Docker --unattended &; while [ -z "$(docker info 2> /dev/null )" ]; do printf "."; sleep 1; done; echo "" || true
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
install cron/cleanup_and_reboot_Darwin.sh $HOME/cleanup_and_reboot.sh || echo "FAILED TO INSTALL CLEANUP"

View File

@ -38,6 +38,10 @@ grep -E "^VERSION_BUILD \\?=" Makefile | grep "${VERSION_BUILD}"
# Force go packages to the Jekins home directory
export GOPATH=$HOME/go
# Verify ISO exists
echo "Verifying ISO exists ..."
make verify-iso
# Build and upload
env BUILD_IN_DOCKER=y \
make -j 16 \

View File

@ -0,0 +1,34 @@
# Copyright 2019 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
mkdir -p out
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/minikube-windows-amd64.exe out/
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/e2e-windows-amd64.exe out/
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata .
./out/minikube-windows-amd64.exe delete --all
out/e2e-windows-amd64.exe -minikube-start-args="--driver=docker" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m
$env:result=$lastexitcode
# If the last exit code was 0->success, x>0->error
If($env:result -eq 0){$env:status="success"}
Else {$env:status="failure"}
# $env:SHORT_COMMIT=$env:COMMIT.substring(0, 7)
# to be used later to implement https://github.com/kubernetes/minikube/issues/6593
$env:target_url="https://storage.googleapis.com/minikube-builds/logs/$env:MINIKUBE_LOCATION/Docker_Windows.txt"
$json = "{`"state`": `"$env:status`", `"description`": `"Jenkins`", `"target_url`": `"$env:target_url`", `"context`": `"Docker_Windows`"}"
Invoke-WebRequest -Uri "https://api.github.com/repos/kubernetes/minikube/statuses/$env:COMMIT`?access_token=$env:access_token" -Body $json -ContentType "application/json" -Method Post -usebasicparsing
Exit $env:result

View File

@ -17,9 +17,9 @@ gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/minikube-windows-am
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/e2e-windows-amd64.exe out/
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata .
./out/minikube-windows-amd64.exe delete
./out/minikube-windows-amd64.exe delete --all
out/e2e-windows-amd64.exe -minikube-start-args="--driver=hyperv --hyperv-virtual-switch=primary-virtual-switch" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m
out/e2e-windows-amd64.exe -minikube-start-args="--driver=hyperv" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m
$env:result=$lastexitcode
# If the last exit code was 0->success, x>0->error
If($env:result -eq 0){$env:status="success"}

View File

@ -21,6 +21,7 @@ import (
"os"
"os/exec"
"path/filepath"
"time"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/drivers/kic"
@ -29,18 +30,15 @@ import (
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/sysinit"
"k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/util/retry"
)
func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string) error {
defer func() {
if err := deleteMinikube(); err != nil {
fmt.Println(err)
}
}()
driver := kic.NewDriver(kic.Config{
KubernetesVersion: kubernetesVersion,
ContainerRuntime: driver.Docker,
@ -68,44 +66,94 @@ func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string
if err != nil {
return errors.Wrap(err, "kubeadm images")
}
if containerRuntime != "docker" { // kic overlay image is only needed by containerd and cri-o https://github.com/kubernetes/minikube/issues/7428
imgs = append(imgs, kic.OverlayImage)
}
runner := command.NewKICRunner(profile, driver.OCIBinary)
// will need to do this to enable the container run-time service
sv, err := util.ParseKubernetesVersion(kubernetesVersion)
if err != nil {
return errors.Wrap(err, "Failed to parse kubernetes version")
}
co := cruntime.Config{
Type: containerRuntime,
Runner: runner,
ImageRepository: "",
KubernetesVersion: sv, // this is just to satisfy cruntime and shouldnt matter what version.
}
cr, err := cruntime.New(co)
if err != nil {
return errors.Wrap(err, "failed create new runtime")
}
if err := cr.Enable(true); err != nil {
return errors.Wrap(err, "enable container runtime")
}
for _, img := range imgs {
cmd := exec.Command("docker", "exec", profile, "docker", "pull", img)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return errors.Wrapf(err, "downloading %s", img)
pull := func() error {
cmd := imagePullCommand(containerRuntime, img)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
time.Sleep(time.Second) // to avoid error: : exec: already started
return errors.Wrapf(err, "pulling image %s", img)
}
return nil
}
// retry up to 5 times if network is bad
if err = retry.Expo(pull, time.Microsecond, time.Minute, 5); err != nil {
return errors.Wrapf(err, "pull image %s", img)
}
}
// Transfer in k8s binaries
kcfg := config.KubernetesConfig{
KubernetesVersion: kubernetesVersion,
}
runner := command.NewKICRunner(profile, driver.OCIBinary)
sm := sysinit.New(runner)
if err := bsutil.TransferBinaries(kcfg, runner, sm); err != nil {
return errors.Wrap(err, "transferring k8s binaries")
}
// Create image tarball
if err := createImageTarball(tarballFilename); err != nil {
if err := createImageTarball(tarballFilename, containerRuntime); err != nil {
return errors.Wrap(err, "create tarball")
}
return copyTarballToHost(tarballFilename)
}
func createImageTarball(tarballFilename string) error {
// returns the right command to pull image for a specific runtime
func imagePullCommand(containerRuntime, img string) *exec.Cmd {
if containerRuntime == "docker" {
return exec.Command("docker", "exec", profile, "docker", "pull", img)
}
if containerRuntime == "containerd" {
return exec.Command("docker", "exec", profile, "sudo", "crictl", "pull", img)
}
return nil
}
func createImageTarball(tarballFilename, containerRuntime string) error {
// directories to save into tarball
dirs := []string{
fmt.Sprintf("./lib/docker/%s", dockerStorageDriver),
"./lib/docker/image",
"./lib/minikube/binaries",
}
if containerRuntime == "docker" {
dirs = append(dirs, fmt.Sprintf("./lib/docker/%s", dockerStorageDriver), "./lib/docker/image")
}
if containerRuntime == "containerd" {
dirs = append(dirs, fmt.Sprintf("./lib/containerd"))
}
args := []string{"exec", profile, "sudo", "tar", "-I", "lz4", "-C", "/var", "-cvf", tarballFilename}
args = append(args, dirs...)
cmd := exec.Command("docker", args...)
@ -127,7 +175,7 @@ func copyTarballToHost(tarballFilename string) error {
}
func deleteMinikube() error {
cmd := exec.Command(minikubePath, "delete", "-p", profile)
cmd := exec.Command(minikubePath, "delete", "-p", profile) // to avoid https://github.com/kubernetes/minikube/issues/7814
cmd.Stdout = os.Stdout
return cmd.Run()
}

View File

@ -20,12 +20,13 @@ import (
"bytes"
"flag"
"fmt"
"os"
"os/exec"
"runtime/debug"
"strings"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/exit"
)
const (
@ -35,7 +36,7 @@ const (
var (
dockerStorageDriver = "overlay2"
containerRuntimes = []string{"docker"}
containerRuntimes = []string{"docker", "containerd"}
k8sVersion string
k8sVersions []string
)
@ -50,14 +51,24 @@ func init() {
}
func main() {
defer func() {
if err := deleteMinikube(); err != nil {
fmt.Printf("error cleaning up minikube: %v \n", err)
}
}()
if err := deleteMinikube(); err != nil {
fmt.Printf("error cleaning up minikube at start up: %v \n", err)
}
if err := verifyDockerStorage(); err != nil {
exit.WithError("Docker storage type is incompatible: %v\n", err)
exit("Docker storage type is incompatible: %v \n", err)
}
if k8sVersions == nil {
var err error
k8sVersions, err = RecentK8sVersions()
if err != nil {
exit.WithError("Unable to get recent k8s versions: %v\n", err)
exit("Unable to get recent k8s versions: %v\n", err)
}
}
@ -65,16 +76,21 @@ func main() {
for _, cr := range containerRuntimes {
tf := download.TarballName(kv, cr)
if download.PreloadExists(kv, cr) {
fmt.Printf("A preloaded tarball for k8s version %s already exists, skipping generation.\n", kv)
fmt.Printf("A preloaded tarball for k8s version %s - runtime %q already exists, skipping generation.\n", kv, cr)
continue
}
fmt.Printf("A preloaded tarball for k8s version %s doesn't exist, generating now...\n", kv)
fmt.Printf("A preloaded tarball for k8s version %s - runtime %q doesn't exist, generating now...\n", kv, cr)
if err := generateTarball(kv, cr, tf); err != nil {
exit.WithError(fmt.Sprintf("generating tarball for k8s version %s with %s", kv, cr), err)
exit(fmt.Sprintf("generating tarball for k8s version %s with %s", kv, cr), err)
}
if err := uploadTarball(tf); err != nil {
exit.WithError(fmt.Sprintf("uploading tarball for k8s version %s with %s", kv, cr), err)
exit(fmt.Sprintf("uploading tarball for k8s version %s with %s", kv, cr), err)
}
if err := deleteMinikube(); err != nil {
fmt.Printf("error cleaning up minikube before finishing up: %v\n", err)
}
}
}
}
@ -93,3 +109,12 @@ func verifyDockerStorage() error {
}
return nil
}
// exit will exit and clean up minikube
func exit(msg string, err error) {
fmt.Printf("WithError(%s)=%v called from:\n%s", msg, err, debug.Stack())
if err := deleteMinikube(); err != nil {
fmt.Printf("error cleaning up minikube at start up: %v\n", err)
}
os.Exit(60)
}

View File

@ -28,9 +28,12 @@ import (
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/machine"
@ -176,6 +179,17 @@ https://github.com/kubernetes/minikube/issues/7332`, out.V{"driver_name": cc.Dri
return nil
}
if name == "registry" {
if driver.NeedsPortForward(cc.Driver) {
port, err := oci.ForwardedPort(cc.Driver, cc.Name, constants.RegistryAddonPort)
if err != nil {
return errors.Wrap(err, "registry port")
}
out.T(out.Tip, `Registry addon on with {{.driver}} uses {{.port}} please use that instead of default 5000`, out.V{"driver": cc.Driver, "port": port})
out.T(out.Documentation, `For more information see: https://minikube.sigs.k8s.io/docs/drivers/{{.driver}}`, out.V{"driver": cc.Driver})
}
}
cmd, err := machine.CommandRunner(host)
if err != nil {
return errors.Wrap(err, "command runner")
@ -244,7 +258,7 @@ func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon,
return err
}
return retry.Expo(apply, 1*time.Second, time.Second*30)
return retry.Expo(apply, 100*time.Microsecond, time.Minute)
}
// enableOrDisableStorageClasses enables or disables storage classes
@ -259,10 +273,6 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st
if name == "storage-provisioner-gluster" {
class = "glusterfile"
}
storagev1, err := storageclass.GetStoragev1()
if err != nil {
return errors.Wrapf(err, "Error getting storagev1 interface %v ", err)
}
api, err := machine.NewAPIClient()
if err != nil {
@ -279,6 +289,11 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st
return enableOrDisableAddon(cc, name, val)
}
storagev1, err := storageclass.GetStoragev1(cc.Name)
if err != nil {
return errors.Wrapf(err, "Error getting storagev1 interface %v ", err)
}
if enable {
// Only StorageClass for 'name' should be marked as default
err = storageclass.SetDefaultStorageClass(storagev1, class)
@ -332,7 +347,9 @@ func Start(wg *sync.WaitGroup, cc *config.ClusterConfig, toEnable map[string]boo
var awg sync.WaitGroup
out.T(out.AddonEnable, "Enabling addons: {{.addons}}", out.V{"addons": strings.Join(toEnableList, ", ")})
defer func() { // making it show after verifications( not perfect till #7613 is closed)
out.T(out.AddonEnable, "Enabled addons: {{.addons}}", out.V{"addons": strings.Join(toEnableList, ", ")})
}()
for _, a := range toEnableList {
awg.Add(1)
go func(name string) {

View File

@ -27,7 +27,12 @@ func TestExtractFile(t *testing.T) {
if nil != err {
return
}
defer os.Remove(testDir)
defer func() { //clean up tempdir
err := os.RemoveAll(testDir)
if err != nil {
t.Errorf("failed to clean up temp folder %q", testDir)
}
}()
tests := []struct {
name string

View File

@ -26,7 +26,6 @@ import (
"time"
"github.com/docker/machine/libmachine/drivers"
"github.com/docker/machine/libmachine/log"
"github.com/docker/machine/libmachine/ssh"
"github.com/docker/machine/libmachine/state"
"github.com/golang/glog"
@ -39,6 +38,7 @@ import (
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/sysinit"
"k8s.io/minikube/pkg/util/retry"
)
// Driver represents a kic driver https://minikube.sigs.k8s.io/docs/reference/drivers/docker
@ -93,6 +93,10 @@ func (d *Driver) Create() error {
ListenAddress: oci.DefaultBindIPV4,
ContainerPort: constants.DockerDaemonPort,
},
oci.PortMapping{
ListenAddress: oci.DefaultBindIPV4,
ContainerPort: constants.RegistryAddonPort,
},
)
exists, err := oci.ContainerExists(d.OCIBinary, params.Name)
@ -126,7 +130,7 @@ func (d *Driver) Create() error {
return
}
t := time.Now()
glog.Infof("Starting extracting preloaded images to volume")
glog.Infof("Starting extracting preloaded images to volume ...")
// Extract preloaded images to container
if err := oci.ExtractTarballToVolume(download.TarballPath(d.NodeConfig.KubernetesVersion, d.NodeConfig.ContainerRuntime), params.Name, BaseImage); err != nil {
glog.Infof("Unable to extract preloaded tarball to volume: %v", err)
@ -259,9 +263,14 @@ func (d *Driver) Kill() error {
if err := sysinit.New(d.exec).ForceStop("kubelet"); err != nil {
glog.Warningf("couldn't force stop kubelet. will continue with kill anyways: %v", err)
}
cmd := exec.Command(d.NodeConfig.OCIBinary, "kill", d.MachineName)
if err := cmd.Run(); err != nil {
return errors.Wrapf(err, "killing kic node %s", d.MachineName)
if err := oci.ShutDown(d.OCIBinary, d.MachineName); err != nil {
glog.Warningf("couldn't shutdown the container, will continue with kill anyways: %v", err)
}
cr := command.NewExecRunner() // using exec runner for interacting with dameon.
if _, err := cr.RunCmd(exec.Command(d.NodeConfig.OCIBinary, "kill", d.MachineName)); err != nil {
return errors.Wrapf(err, "killing %q", d.MachineName)
}
return nil
}
@ -269,16 +278,22 @@ func (d *Driver) Kill() error {
// Remove will delete the Kic Node Container
func (d *Driver) Remove() error {
if _, err := oci.ContainerID(d.OCIBinary, d.MachineName); err != nil {
log.Warnf("could not find the container %s to remove it.", d.MachineName)
glog.Infof("could not find the container %s to remove it. will try anyways", d.MachineName)
}
cmd := exec.Command(d.NodeConfig.OCIBinary, "rm", "-f", "-v", d.MachineName)
o, err := cmd.CombinedOutput()
out := strings.TrimSpace(string(o))
if err != nil {
if strings.Contains(out, "is already in progress") {
log.Warnf("Docker engine is stuck. please restart docker daemon on your computer.", d.MachineName)
if err := oci.DeleteContainer(d.NodeConfig.OCIBinary, d.MachineName); err != nil {
if strings.Contains(err.Error(), "is already in progress") {
return errors.Wrap(err, "stuck delete")
}
return errors.Wrapf(err, "removing container %s, output %s", d.MachineName, out)
if strings.Contains(err.Error(), "No such container:") {
return nil // nothing was found to delete.
}
}
// check there be no container left after delete
if id, err := oci.ContainerID(d.OCIBinary, d.MachineName); err == nil && id != "" {
return fmt.Errorf("expected no container ID be found for %q after delete. but got %q", d.MachineName, id)
}
return nil
}
@ -287,40 +302,43 @@ func (d *Driver) Remove() error {
func (d *Driver) Restart() error {
s, err := d.GetState()
if err != nil {
return errors.Wrap(err, "get kic state")
glog.Warningf("get state during restart: %v", err)
}
switch s {
case state.Stopped:
if s == state.Stopped { // don't stop if already stopped
return d.Start()
case state.Running, state.Error:
if err = d.Stop(); err != nil {
return fmt.Errorf("restarting a kic stop phase %v", err)
}
if err = d.Start(); err != nil {
return fmt.Errorf("restarting a kic start phase %v", err)
}
return nil
}
if err = d.Stop(); err != nil {
return fmt.Errorf("stop during restart %v", err)
}
if err = d.Start(); err != nil {
return fmt.Errorf("start during restart %v", err)
}
return nil
return fmt.Errorf("restarted not implemented for kic state %s yet", s)
}
// Start a _stopped_ kic container
// not meant to be used for Create().
// Start an already created kic container
func (d *Driver) Start() error {
s, err := d.GetState()
if err != nil {
return errors.Wrap(err, "get kic state")
cr := command.NewExecRunner() // using exec runner for interacting with docker/podman daemon
if _, err := cr.RunCmd(exec.Command(d.NodeConfig.OCIBinary, "start", d.MachineName)); err != nil {
return errors.Wrap(err, "start")
}
if s == state.Stopped {
cmd := exec.Command(d.NodeConfig.OCIBinary, "start", d.MachineName)
if err := cmd.Run(); err != nil {
return errors.Wrapf(err, "starting a stopped kic node %s", d.MachineName)
checkRunning := func() error {
s, err := oci.ContainerStatus(d.NodeConfig.OCIBinary, d.MachineName)
if err != nil {
return err
}
if s != state.Running {
return fmt.Errorf("expected container state be running but got %q", s)
}
glog.Infof("container %q state is running.", d.MachineName)
return nil
}
// TODO:medyagh maybe make it idempotent
return fmt.Errorf("cant start a not-stopped (%s) kic node", s)
if err := retry.Expo(checkRunning, 500*time.Microsecond, time.Second*30); err != nil {
return err
}
return nil
}
// Stop a host gracefully, including any containers that we are managing.

View File

@ -25,6 +25,7 @@ import (
"bufio"
"bytes"
"github.com/docker/machine/libmachine/state"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/constants"
@ -42,7 +43,7 @@ import (
func DeleteContainersByLabel(ociBin string, label string) []error {
var deleteErrs []error
cs, err := listContainersByLabel(ociBin, label)
cs, err := ListContainersByLabel(ociBin, label)
if err != nil {
return []error{fmt.Errorf("listing containers by label %q", label)}
}
@ -60,6 +61,9 @@ func DeleteContainersByLabel(ociBin string, label string) []error {
glog.Errorf("%s daemon seems to be stuck. Please try restarting your %s. :%v", ociBin, ociBin, err)
continue
}
if err := ShutDown(ociBin, c); err != nil {
glog.Infof("couldn't shut down %s (might be okay): %v ", c, err)
}
cmd := exec.Command(ociBin, "rm", "-f", "-v", c)
if out, err := cmd.CombinedOutput(); err != nil {
deleteErrs = append(deleteErrs, errors.Wrapf(err, "delete container %s: output %s", c, out))
@ -77,6 +81,9 @@ func DeleteContainer(ociBin string, name string) error {
glog.Errorf("%s daemon seems to be stuck. Please try restarting your %s. Will try to delete anyways: %v", ociBin, ociBin, err)
}
// try to delete anyways
if err := ShutDown(ociBin, name); err != nil {
glog.Infof("couldn't shut down %s (might be okay): %v ", name, err)
}
cmd := exec.Command(ociBin, "rm", "-f", "-v", name)
if out, err := cmd.CombinedOutput(); err != nil {
return errors.Wrapf(err, "delete container %s: output %s", name, out)
@ -108,7 +115,9 @@ func CreateContainerNode(p CreateParams) error {
// including some ones docker would otherwise do by default.
// for now this is what we want. in the future we may revisit this.
"--privileged",
"--security-opt", "seccomp=unconfined", // also ignore seccomp
"--security-opt", "seccomp=unconfined", // ignore seccomp
// ignore apparmore github actions docker: https://github.com/kubernetes/minikube/issues/7624
"--security-opt", "apparmor=unconfined",
"--tmpfs", "/tmp", // various things depend on working /tmp
"--tmpfs", "/run", // systemd wants a writable /run
// logs,pods be stroed on filesystem vs inside container,
@ -163,7 +172,7 @@ func CreateContainerNode(p CreateParams) error {
if err != nil {
return fmt.Errorf("temporary error checking status for %q : %v", p.Name, err)
}
if s != "running" {
if s != state.Running {
return fmt.Errorf("temporary error created container %q is not running yet", p.Name)
}
glog.Infof("the created container %q has a running status.", p.Name)
@ -313,7 +322,7 @@ func IsCreatedByMinikube(ociBinary string, nameOrID string) bool {
// ListOwnedContainers lists all the containres that kic driver created on user's machine using a label
func ListOwnedContainers(ociBinary string) ([]string, error) {
return listContainersByLabel(ociBinary, ProfileLabelKey)
return ListContainersByLabel(ociBinary, ProfileLabelKey)
}
// inspect return low-level information on containers
@ -443,8 +452,8 @@ func withPortMappings(portMappings []PortMapping) createOpt {
}
}
// listContainersByLabel returns all the container names with a specified label
func listContainersByLabel(ociBinary string, label string) ([]string, error) {
// ListContainersByLabel returns all the container names with a specified label
func ListContainersByLabel(ociBinary string, label string) ([]string, error) {
stdout, err := WarnIfSlow(ociBinary, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}")
if err != nil {
return nil, err
@ -480,7 +489,51 @@ func PointToHostDockerDaemon() error {
}
// ContainerStatus returns status of a container running,exited,...
func ContainerStatus(ociBin string, name string) (string, error) {
func ContainerStatus(ociBin string, name string) (state.State, error) {
out, err := WarnIfSlow(ociBin, "inspect", name, "--format={{.State.Status}}")
return strings.TrimSpace(string(out)), err
o := strings.TrimSpace(string(out))
switch o {
case "running":
return state.Running, nil
case "exited":
return state.Stopped, nil
case "paused":
return state.Paused, nil
case "restarting":
return state.Starting, nil
case "dead":
return state.Error, nil
default:
return state.None, errors.Wrapf(err, "unknown state %q", name)
}
}
// Shutdown will run command to shut down the container
// to ensure the containers process and networking bindings are all closed
// to avoid containers getting stuck before delete https://github.com/kubernetes/minikube/issues/7657
func ShutDown(ociBin string, name string) error {
cmd := exec.Command(ociBin, "exec", "--privileged", "-t", name, "/bin/bash", "-c", "sudo init 0")
if out, err := cmd.CombinedOutput(); err != nil {
glog.Infof("error shutdown %s output %q : %v", name, out, err)
}
// helps with allowing docker realize the container is exited and report its status correctly.
time.Sleep(time.Second * 1)
// wait till it is stoped
stopped := func() error {
st, err := ContainerStatus(ociBin, name)
if st == state.Stopped {
glog.Infof("container %s status is %s", name, st)
return nil
}
if err != nil {
glog.Infof("temporary error verifying shutdown: %v", err)
}
glog.Infof("temporary error: container %s status is %s but expect it to be exited", name, st)
return errors.Wrap(err, "couldn't verify cointainer is exited. %v")
}
if err := retry.Expo(stopped, time.Millisecond*500, time.Second*20); err != nil {
return errors.Wrap(err, "verify shutdown")
}
glog.Infof("Successfully shutdown container %s", name)
return nil
}

View File

@ -1,45 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ktmpl
import "text/template"
// V1Alpha1 is for Kubernetes v1.11
var V1Alpha1 = template.Must(template.New("configTmpl-v1alpha1").Funcs(template.FuncMap{
"printMapInOrder": printMapInOrder,
}).Parse(`apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
{{if .NoTaintMaster}}noTaintMaster: true{{end}}
api:
advertiseAddress: {{.AdvertiseAddress}}
bindPort: {{.APIServerPort}}
controlPlaneEndpoint: {{.ControlPlaneAddress}}
kubernetesVersion: {{.KubernetesVersion}}
certificatesDir: {{.CertDir}}
networking:
serviceSubnet: {{.ServiceCIDR}}
etcd:
dataDir: {{.EtcdDataDir}}
nodeName: "{{.NodeName}}"
apiServerCertSANs: ["127.0.0.1", "localhost", "{{.AdvertiseAddress}}"]
{{if .ImageRepository}}imageRepository: {{.ImageRepository}}
{{end}}{{if .CRISocket}}criSocket: {{.CRISocket}}
{{end}}{{range .ComponentOptions}}{{.Component}}ExtraArgs:{{range $i, $val := printMapInOrder .ExtraArgs ": " }}
{{$val}}{{end}}
{{end}}{{if .FeatureArgs}}featureGates: {{range $i, $val := .FeatureArgs}}
{{$i}}: {{$val}}{{end}}
{{end}}`))

View File

@ -110,10 +110,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana
opts.NoTaintMaster = true
b := bytes.Buffer{}
configTmpl := ktmpl.V1Alpha1
if version.GTE(semver.MustParse("1.12.0")) {
configTmpl = ktmpl.V1Alpha3
}
configTmpl := ktmpl.V1Alpha3
// v1beta1 works in v1.13, but isn't required until v1.14.
if version.GTE(semver.MustParse("1.14.0-alpha.0")) {
configTmpl = ktmpl.V1Beta1

View File

@ -70,7 +70,7 @@ func getExtraOptsPodCidr() []config.ExtraOption {
func recentReleases() ([]string, error) {
// test the 6 most recent releases
versions := []string{"v1.19", "v1.18", "v1.17", "v1.16", "v1.15", "v1.14", "v1.13", "v1.12", "v1.11"}
versions := []string{"v1.19", "v1.18", "v1.17", "v1.16", "v1.15", "v1.14", "v1.13", "v1.12"}
foundNewest := false
foundDefault := false

View File

@ -55,7 +55,7 @@ Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.11.10/kubelet --allow-privileged=true --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cadvisor-port=0 --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests
ExecStart=/var/lib/minikube/binaries/v1.12.0/kubelet --allow-privileged=true --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cadvisor-port=0 --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests
[Install]
`,
@ -200,10 +200,7 @@ ExecStart=/var/lib/minikube/binaries/v1.18.1/kubelet --authorization-mode=Webhoo
Context: 1,
})
if err != nil {
t.Fatalf("diff error: %v", err)
}
if diff != "" {
t.Errorf("unexpected diff:\n%s", diff)
t.Fatalf("diff error: %v\n%s", err, diff)
}
})
}

View File

@ -166,7 +166,15 @@ func APIServerStatus(cr command.Runner, hostname string, port int) (state.State,
rr, err = cr.RunCmd(exec.Command("sudo", "cat", path.Join("/sys/fs/cgroup/freezer", fparts[2], "freezer.state")))
if err != nil {
glog.Errorf("unable to get freezer state: %s", rr.Stderr.String())
// example error from github action:
// cat: /sys/fs/cgroup/freezer/actions_job/e62ef4349cc5a70f4b49f8a150ace391da6ad6df27073c83ecc03dbf81fde1ce/kubepods/burstable/poda1de58db0ce81d19df7999f6808def1b/5df53230fe3483fd65f341923f18a477fda92ae9cd71061168130ef164fe479c/freezer.state: No such file or directory\n"*
// TODO: #7770 investigate how to handle this error better.
if strings.Contains(rr.Stderr.String(), "freezer.state: No such file or directory\n") {
glog.Infof("unable to get freezer state (might be okay and be related to #770): %s", rr.Stderr.String())
} else {
glog.Warningf("unable to get freezer state : %s", rr.Stderr.String())
}
return apiServerHealthz(hostname, port)
}

View File

@ -18,36 +18,36 @@ limitations under the License.
package kverify
import (
"fmt"
"time"
"github.com/golang/glog"
"github.com/pkg/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/minikube/pkg/util/retry"
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
// WaitForDefaultSA waits for the default service account to be created.
func WaitForDefaultSA(cs *kubernetes.Clientset, timeout time.Duration) error {
glog.Info("waiting for default service account to be created ...")
start := time.Now()
saReady := func() error {
saReady := func() (bool, error) {
// equivalent to manual check of 'kubectl --context profile get serviceaccount default'
sas, err := cs.CoreV1().ServiceAccounts("default").List(meta.ListOptions{})
if err != nil {
glog.Infof("temproary error waiting for default SA: %v", err)
return err
return false, nil
}
for _, sa := range sas.Items {
if sa.Name == "default" {
glog.Infof("found service account: %q", sa.Name)
return nil
return true, nil
}
}
return fmt.Errorf("couldn't find default service account")
return false, nil
}
if err := retry.Expo(saReady, 500*time.Millisecond, timeout); err != nil {
if err := wait.PollImmediate(kconst.APICallRetryInterval, timeout, saReady); err != nil {
return errors.Wrapf(err, "waited %s for SA", time.Since(start))
}

View File

@ -32,7 +32,9 @@ const (
// DefaultSAWaitKey is the name used in the flags for default service account
DefaultSAWaitKey = "default_sa"
// AppsRunning is the name used in the flags for waiting for k8s-apps to be running
AppsRunning = "apps_running"
AppsRunningKey = "apps_running"
// NodeReadyKey is the name used in the flags for waiting for the node status to be ready
NodeReadyKey = "node_ready"
)
// vars related to the --wait flag
@ -40,13 +42,13 @@ var (
// DefaultComponents is map of the the default components to wait for
DefaultComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true}
// NoWaitComponents is map of componets to wait for if specified 'none' or 'false'
NoComponents = map[string]bool{APIServerWaitKey: false, SystemPodsWaitKey: false, DefaultSAWaitKey: false, AppsRunning: false}
NoComponents = map[string]bool{APIServerWaitKey: false, SystemPodsWaitKey: false, DefaultSAWaitKey: false, AppsRunningKey: false, NodeReadyKey: false}
// AllComponents is map for waiting for all components.
AllComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true, DefaultSAWaitKey: true, AppsRunning: true}
AllComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true, DefaultSAWaitKey: true, AppsRunningKey: true}
// DefaultWaitList is list of all default components to wait for. only names to be used for start flags.
DefaultWaitList = []string{APIServerWaitKey, SystemPodsWaitKey}
// AllComponentsList list of all valid components keys to wait for. only names to be used used for start flags.
AllComponentsList = []string{APIServerWaitKey, SystemPodsWaitKey, DefaultSAWaitKey, AppsRunning}
AllComponentsList = []string{APIServerWaitKey, SystemPodsWaitKey, DefaultSAWaitKey, AppsRunningKey, NodeReadyKey}
// AppsRunningList running list are valid k8s-app components to wait for them to be running
AppsRunningList = []string{
"kube-dns", // coredns

View File

@ -0,0 +1,142 @@
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package kverify verifies a running kubernetes cluster is healthy
package kverify
import (
"fmt"
"time"
"github.com/golang/glog"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// NodeCondition represents a favorable or unfavorable node condition.
type NodeCondition struct {
Type v1.NodeConditionType
Status v1.ConditionStatus
Reason string
Message string
}
// DiskPressure detects if the condition is disk pressure
func (pc *NodeCondition) DiskPressure() bool {
return pc.Type == v1.NodeDiskPressure && pc.Status == v1.ConditionTrue
}
// MemoryPressure detects if the condition is memory pressure
func (pc *NodeCondition) MemoryPressure() bool {
return pc.Type == v1.NodeMemoryPressure && pc.Status == v1.ConditionTrue
}
// PIDPressure detects if the condition is PID pressure
func (pc *NodeCondition) PIDPressure() bool {
return pc.Type == v1.NodePIDPressure && pc.Status == v1.ConditionTrue
}
// NetworkUnavailable detects if the condition is PID pressure
func (pc *NodeCondition) NetworkUnavailable() bool {
return pc.Type == v1.NodeNetworkUnavailable && pc.Status == v1.ConditionTrue
}
const errTextFormat = "node has unwanted condition %q : Reason %q Message: %q"
// ErrMemoryPressure is thrown when there is node memory pressure condition
type ErrMemoryPressure struct {
NodeCondition
}
func (e *ErrMemoryPressure) Error() string {
return fmt.Sprintf(errTextFormat, e.Type, e.Reason, e.Message)
}
// ErrDiskPressure is thrown when there is node disk pressure condition
type ErrDiskPressure struct {
NodeCondition
}
func (e *ErrDiskPressure) Error() string {
return fmt.Sprintf(errTextFormat, e.Type, e.Reason, e.Message)
}
// ErrPIDPressure is thrown when there is node PID pressure condition
type ErrPIDPressure struct {
NodeCondition
}
func (e *ErrPIDPressure) Error() string {
return fmt.Sprintf(errTextFormat, e.Type, e.Reason, e.Message)
}
// ErrNetworkNotReady is thrown when there is node condition is network not ready
type ErrNetworkNotReady struct {
NodeCondition
}
func (e *ErrNetworkNotReady) Error() string {
return fmt.Sprintf(errTextFormat, e.Type, e.Reason, e.Message)
}
// NodePressure verfies that node is not under disk, memory, pid or network pressure.
func NodePressure(cs *kubernetes.Clientset) error {
glog.Info("verifying NodePressure condition ...")
start := time.Now()
defer func() {
glog.Infof("duration metric: took %s to run NodePressure ...", time.Since(start))
}()
ns, err := cs.CoreV1().Nodes().List(meta.ListOptions{})
if err != nil {
return errors.Wrap(err, "list nodes")
}
for _, n := range ns.Items {
glog.Infof("node storage ephemeral capacity is %s", n.Status.Capacity.StorageEphemeral())
glog.Infof("node cpu capacity is %s", n.Status.Capacity.Cpu().AsDec())
for _, c := range n.Status.Conditions {
pc := NodeCondition{Type: c.Type, Status: c.Status, Reason: c.Reason, Message: c.Message}
if pc.DiskPressure() {
return &ErrDiskPressure{
NodeCondition: pc,
}
}
if pc.MemoryPressure() {
return &ErrMemoryPressure{
NodeCondition: pc,
}
}
if pc.PIDPressure() {
return &ErrPIDPressure{
NodeCondition: pc,
}
}
if pc.NetworkUnavailable() {
return &ErrNetworkNotReady{
NodeCondition: pc,
}
}
}
}
return nil
}

View File

@ -0,0 +1,64 @@
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package kverify verifies a running kubernetes cluster is healthy
package kverify
import (
"fmt"
"time"
"github.com/golang/glog"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
// WaitForNodeReady waits till kube client reports node status as "ready"
func WaitForNodeReady(cs *kubernetes.Clientset, timeout time.Duration) error {
glog.Info("waiting for node status to be ready ...")
start := time.Now()
defer func() {
glog.Infof("duration metric: took %s to wait for WaitForNodeReady...", time.Since(start))
}()
checkReady := func() (bool, error) {
if time.Since(start) > timeout {
return false, fmt.Errorf("wait for node to be ready timed out")
}
ns, err := cs.CoreV1().Nodes().List(meta.ListOptions{})
if err != nil {
glog.Infof("error listing nodes will retry: %v", err)
return false, nil
}
for _, n := range ns.Items {
for _, c := range n.Status.Conditions {
if c.Type == v1.NodeReady && c.Status != v1.ConditionTrue {
glog.Infof("node %q has unwanted condition %q : Reason %q Message: %q. will try. ", n.Name, c.Type, c.Reason, c.Message)
return false, nil
}
}
}
return true, nil
}
if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, checkReady); err != nil {
return errors.Wrapf(err, "wait node ready")
}
return nil
}

View File

@ -101,18 +101,6 @@ func TestKubeadmImages(t *testing.T) {
"kubernetesui/dashboard:v2.0.0-rc6",
"kubernetesui/metrics-scraper:v1.0.2",
}},
{"v1.11.10", "", []string{
"k8s.gcr.io/kube-proxy-amd64:v1.11.10",
"k8s.gcr.io/kube-scheduler-amd64:v1.11.10",
"k8s.gcr.io/kube-controller-manager-amd64:v1.11.10",
"k8s.gcr.io/kube-apiserver-amd64:v1.11.10",
"k8s.gcr.io/coredns:1.1.3",
"k8s.gcr.io/etcd-amd64:3.2.18",
"k8s.gcr.io/pause:3.1",
"gcr.io/k8s-minikube/storage-provisioner:v1.8.1",
"kubernetesui/dashboard:v2.0.0-rc6",
"kubernetesui/metrics-scraper:v1.0.2",
}},
}
for _, tc := range tests {
got, err := Kubeadm(tc.mirror, tc.version)

View File

@ -21,6 +21,7 @@ import (
"context"
"os/exec"
"path"
"runtime"
"sync"
"fmt"
@ -37,9 +38,11 @@ import (
"github.com/docker/machine/libmachine/state"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/minikube/pkg/drivers/kic"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/kapi"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/bootstrapper"
@ -174,14 +177,27 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
"FileAvailable--etc-kubernetes-manifests-etcd.yaml",
"Port-10250", // For "none" users who already have a kubelet online
"Swap", // For "none" users who have swap configured
"SystemVerification",
}
ignore = append(ignore, bsutil.SkipAdditionalPreflights[r.Name()]...)
skipSystemVerification := false
// Allow older kubeadm versions to function with newer Docker releases.
if version.LT(semver.MustParse("1.13.0")) {
glog.Infof("ignoring SystemVerification for kubeadm because of old kubernetes version %v", version)
skipSystemVerification = true
}
if driver.BareMetal(cfg.Driver) && r.Name() == "Docker" {
if v, err := r.Version(); err == nil && strings.Contains(v, "azure") {
glog.Infof("ignoring SystemVerification for kubeadm because of unknown docker version %s", v)
skipSystemVerification = true
}
}
// For kic on linux example error: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.2.17-1rodete3-amd64"
if version.LT(semver.MustParse("1.13.0")) || driver.IsKIC(cfg.Driver) {
glog.Info("ignoring SystemVerification for kubeadm because of either driver or kubernetes version")
if driver.IsKIC(cfg.Driver) {
glog.Infof("ignoring SystemVerification for kubeadm because of %s driver", cfg.Driver)
skipSystemVerification = true
}
if skipSystemVerification {
ignore = append(ignore, "SystemVerification")
}
@ -202,9 +218,13 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
}
var wg sync.WaitGroup
wg.Add(4)
wg.Add(3)
go func() {
// we need to have cluster role binding before applying overlay to avoid #7428
if err := k.elevateKubeSystemPrivileges(cfg); err != nil {
glog.Errorf("unable to create cluster role binding, some addons might not work: %v", err)
}
// the overlay is required for containerd and cri-o runtime: see #7428
if driver.IsKIC(cfg.Driver) && cfg.KubernetesConfig.ContainerRuntime != "docker" {
if err := k.applyKICOverlay(cfg); err != nil {
@ -228,13 +248,6 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
wg.Done()
}()
go func() {
if err := k.elevateKubeSystemPrivileges(cfg); err != nil {
glog.Warningf("unable to create cluster role binding, some addons might not work: %v", err)
}
wg.Done()
}()
wg.Wait()
return nil
}
@ -316,7 +329,7 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error
endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(ip, strconv.Itoa(port)))
if cc.Host != endpoint {
glog.Errorf("Overriding stale ClientConfig host %s with %s", cc.Host, endpoint)
glog.Warningf("Overriding stale ClientConfig host %s with %s", cc.Host, endpoint)
cc.Host = endpoint
}
c, err := kubernetes.NewForConfig(cc)
@ -327,16 +340,36 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error
}
// WaitForNode blocks until the node appears to be healthy
func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, timeout time.Duration) error {
func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, timeout time.Duration) (waitErr error) {
start := time.Now()
if !n.ControlPlane {
glog.Infof("%s is not a control plane, nothing to wait for", n.Name)
return nil
}
out.T(out.HealthCheck, "Verifying Kubernetes components...")
// TODO: #7706: for better performance we could use k.client inside minikube to avoid asking for external IP:PORT
hostname, _, port, err := driver.ControlPaneEndpoint(&cfg, &n, cfg.Driver)
if err != nil {
return errors.Wrap(err, "get control plane endpoint")
}
defer func() { // run pressure verification after all other checks, so there be an api server to talk to.
client, err := k.client(hostname, port)
if err != nil {
waitErr = errors.Wrap(err, "get k8s client")
}
if err := kverify.NodePressure(client); err != nil {
adviseNodePressure(err, cfg.Name, cfg.Driver)
waitErr = errors.Wrap(err, "node pressure")
}
}()
if !kverify.ShouldWait(cfg.VerifyComponents) {
glog.Infof("skip waiting for components based on config.")
return nil
return waitErr
}
cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c})
@ -344,11 +377,6 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
return errors.Wrapf(err, "create runtme-manager %s", cfg.KubernetesConfig.ContainerRuntime)
}
hostname, _, port, err := driver.ControlPaneEndpoint(&cfg, &n, cfg.Driver)
if err != nil {
return errors.Wrap(err, "get control plane endpoint")
}
if cfg.VerifyComponents[kverify.APIServerWaitKey] {
client, err := k.client(hostname, port)
if err != nil {
@ -383,7 +411,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
}
}
if cfg.VerifyComponents[kverify.AppsRunning] {
if cfg.VerifyComponents[kverify.AppsRunningKey] {
client, err := k.client(hostname, port)
if err != nil {
return errors.Wrap(err, "get k8s client")
@ -393,8 +421,18 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
}
}
if cfg.VerifyComponents[kverify.NodeReadyKey] {
client, err := k.client(hostname, port)
if err != nil {
return errors.Wrap(err, "get k8s client")
}
if err := kverify.WaitForNodeReady(client, timeout); err != nil {
return errors.Wrap(err, "waiting for node to be ready")
}
}
glog.Infof("duration metric: took %s to wait for : %+v ...", time.Since(start), cfg.VerifyComponents)
return nil
return waitErr
}
// needsReset returns whether or not the cluster needs to be reconfigured
@ -424,7 +462,8 @@ func (k *Bootstrapper) needsReset(conf string, hostname string, port int, client
glog.Infof("needs reset: %v", err)
return true
}
// to be used in the ingeration test to verify it wont reset.
glog.Infof("The running cluster does not need a reset. hostname: %s", hostname)
return false
}
@ -518,12 +557,16 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
return errors.Wrap(err, "system pods")
}
if err := kverify.NodePressure(client); err != nil {
adviseNodePressure(err, cfg.Name, cfg.Driver)
}
// This can fail during upgrades if the old pods have not shut down yet
addonPhase := func() error {
_, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, conf)))
return err
}
if err = retry.Expo(addonPhase, 1*time.Second, 30*time.Second); err != nil {
if err = retry.Expo(addonPhase, 100*time.Microsecond, 30*time.Second); err != nil {
glog.Warningf("addon install failed, wil retry: %v", err)
return errors.Wrap(err, "addons")
}
@ -573,14 +616,24 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) {
// DeleteCluster removes the components that were started earlier
func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket})
if err != nil {
return errors.Wrap(err, "runtime")
}
version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion)
if err != nil {
return errors.Wrap(err, "parsing kubernetes version")
}
cmd := fmt.Sprintf("%s reset --force", bsutil.InvokeKubeadm(k8s.KubernetesVersion))
ka := bsutil.InvokeKubeadm(k8s.KubernetesVersion)
sp := cr.SocketPath()
if sp == "" {
sp = kconst.DefaultDockerCRISocket
}
cmd := fmt.Sprintf("%s reset --cri-socket %s --force", ka, sp)
if version.LT(semver.MustParse("1.11.0")) {
cmd = fmt.Sprintf("%s reset", bsutil.InvokeKubeadm(k8s.KubernetesVersion))
cmd = fmt.Sprintf("%s reset --cri-socket %s", ka, sp)
}
rr, derr := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd))
@ -592,11 +645,6 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
glog.Warningf("stop kubelet: %v", err)
}
cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket})
if err != nil {
return errors.Wrap(err, "runtime")
}
containers, err := cr.ListContainers(cruntime.ListOptions{Namespaces: []string{"kube-system"}})
if err != nil {
glog.Warningf("unable to list kube-system containers: %v", err)
@ -750,6 +798,9 @@ func startKubeletIfRequired(runner command.Runner, sm sysinit.Manager) error {
return errors.Wrap(err, "starting kubelet")
}
if err := sm.Enable("kubelet"); err != nil {
return err
}
return sm.Start("kubelet")
}
@ -820,6 +871,10 @@ func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error {
// elevateKubeSystemPrivileges gives the kube-system service account cluster admin privileges to work with RBAC.
func (k *Bootstrapper) elevateKubeSystemPrivileges(cfg config.ClusterConfig) error {
start := time.Now()
defer func() {
glog.Infof("duration metric: took %s to wait for elevateKubeSystemPrivileges.", time.Since(start))
}()
// Allow no more than 5 seconds for creating cluster role bindings
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
@ -836,6 +891,84 @@ func (k *Bootstrapper) elevateKubeSystemPrivileges(cfg config.ClusterConfig) err
return nil
}
}
glog.Infof("duration metric: took %s to wait for elevateKubeSystemPrivileges.", time.Since(start))
return err
if cfg.VerifyComponents[kverify.DefaultSAWaitKey] {
// double checking defalut sa was created.
// good for ensuring using minikube in CI is robust.
checkSA := func() (bool, error) {
cmd = exec.Command("sudo", kubectlPath(cfg),
"get", "sa", "default", fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")))
rr, err = k.c.RunCmd(cmd)
if err != nil {
return false, nil
}
return true, nil
}
// retry up to make sure SA is created
if err := wait.PollImmediate(kconst.APICallRetryInterval, time.Minute, checkSA); err != nil {
return errors.Wrap(err, "ensure sa was created")
}
}
return nil
}
// adviseNodePressure will advise the user what to do with difference pressure errors based on their environment
func adviseNodePressure(err error, name string, drv string) {
if diskErr, ok := err.(*kverify.ErrDiskPressure); ok {
out.ErrLn("")
glog.Warning(diskErr)
out.WarningT("The node {{.name}} has ran out of disk space.", out.V{"name": name})
// generic advice for all drivers
out.T(out.Tip, "Please free up disk or prune images.")
if driver.IsVM(drv) {
out.T(out.Stopped, "Please create a cluster with bigger disk size: `minikube start --disk SIZE_MB` ")
} else if drv == oci.Docker && runtime.GOOS != "linux" {
out.T(out.Stopped, "Please increse Desktop's disk size.")
if runtime.GOOS == "darwin" {
out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-mac/space/"})
}
if runtime.GOOS == "windows" {
out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-windows/"})
}
}
out.ErrLn("")
return
}
if memErr, ok := err.(*kverify.ErrMemoryPressure); ok {
out.ErrLn("")
glog.Warning(memErr)
out.WarningT("The node {{.name}} has ran out of memory.", out.V{"name": name})
out.T(out.Tip, "Check if you have unnecessary pods running by running 'kubectl get po -A")
if driver.IsVM(drv) {
out.T(out.Stopped, "Consider creating a cluster with larger memory size using `minikube start --memory SIZE_MB` ")
} else if drv == oci.Docker && runtime.GOOS != "linux" {
out.T(out.Stopped, "Consider increasing Docker Desktop's memory size.")
if runtime.GOOS == "darwin" {
out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-mac/space/"})
}
if runtime.GOOS == "windows" {
out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-windows/"})
}
}
out.ErrLn("")
return
}
if pidErr, ok := err.(*kverify.ErrPIDPressure); ok {
glog.Warning(pidErr)
out.ErrLn("")
out.WarningT("The node {{.name}} has ran out of available PIDs.", out.V{"name": name})
out.ErrLn("")
return
}
if netErr, ok := err.(*kverify.ErrNetworkNotReady); ok {
glog.Warning(netErr)
out.ErrLn("")
out.WarningT("The node {{.name}} network is not available. Please verify network settings.", out.V{"name": name})
out.ErrLn("")
return
}
}

View File

@ -21,6 +21,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/golang/glog"
@ -83,6 +84,16 @@ func PrimaryControlPlane(cc *ClusterConfig) (Node, error) {
return cp, nil
}
// ProfileNameValid checks if the profile name is container name friendly
func ProfileNameValid(name string) bool {
// RestrictedNameChars collects the characters allowed to represent a name
const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
var validName = regexp.MustCompile(`^` + RestrictedNameChars + `+$`)
return validName.MatchString(name)
}
// ProfileNameInReservedKeywords checks if the profile is an internal keywords
func ProfileNameInReservedKeywords(name string) bool {
for _, v := range keywords {

View File

@ -72,6 +72,27 @@ func TestListProfiles(t *testing.T) {
}
}
func TestProfileNameValid(t *testing.T) {
var testCases = []struct {
name string
expected bool
}{
{"meaningful_name", true},
{"meaningful_name@", false},
{"n_a_m_e_2", true},
{"n", false},
{"_name", false},
{"N__a.M--E12567", true},
}
for _, tt := range testCases {
got := ProfileNameValid(tt.name)
if got != tt.expected {
t.Errorf("expected ProfileNameValid(%s)=%t but got %t ", tt.name, tt.expected, got)
}
}
}
func TestProfileNameInReservedKeywords(t *testing.T) {
var testCases = []struct {
name string

View File

@ -31,7 +31,7 @@ const (
// NewestKubernetesVersion is the newest Kubernetes version to test against
NewestKubernetesVersion = "v1.18.1"
// OldestKubernetesVersion is the oldest Kubernetes version to test against
OldestKubernetesVersion = "v1.11.10"
OldestKubernetesVersion = "v1.12.0"
// DefaultClusterName is the default nane for the k8s cluster
DefaultClusterName = "minikube"
// DockerDaemonPort is the port Docker daemon listening inside a minikube node (vm or container).
@ -40,6 +40,8 @@ const (
APIServerPort = 8443
// SSHPort is the SSH serviceport on the node vm and container
SSHPort = 22
// RegistryAddonPort os the default registry addon port
RegistryAddonPort = 5000
// APIServerName is the default API server name
APIServerName = "minikubeCA"

View File

@ -19,16 +19,20 @@ package cruntime
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"os/exec"
"path"
"strings"
"text/template"
"time"
"github.com/blang/semver"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/out"
@ -310,5 +314,118 @@ func (r *Containerd) Preload(cfg config.KubernetesConfig) error {
if !download.PreloadExists(cfg.KubernetesVersion, cfg.ContainerRuntime) {
return nil
}
return fmt.Errorf("not yet implemented for %s", r.Name())
k8sVersion := cfg.KubernetesVersion
cRuntime := cfg.ContainerRuntime
// If images already exist, return
images, err := images.Kubeadm(cfg.ImageRepository, k8sVersion)
if err != nil {
return errors.Wrap(err, "getting images")
}
if containerdImagesPreloaded(r.Runner, images) {
glog.Info("Images already preloaded, skipping extraction")
return nil
}
tarballPath := download.TarballPath(k8sVersion, cRuntime)
targetDir := "/"
targetName := "preloaded.tar.lz4"
dest := path.Join(targetDir, targetName)
c := exec.Command("which", "lz4")
if _, err := r.Runner.RunCmd(c); err != nil {
return NewErrISOFeature("lz4")
}
// Copy over tarball into host
fa, err := assets.NewFileAsset(tarballPath, targetDir, targetName, "0644")
if err != nil {
return errors.Wrap(err, "getting file asset")
}
t := time.Now()
if err := r.Runner.Copy(fa); err != nil {
return errors.Wrap(err, "copying file")
}
glog.Infof("Took %f seconds to copy over tarball", time.Since(t).Seconds())
t = time.Now()
// extract the tarball to /var in the VM
if rr, err := r.Runner.RunCmd(exec.Command("sudo", "tar", "-I", "lz4", "-C", "/var", "-xvf", dest)); err != nil {
return errors.Wrapf(err, "extracting tarball: %s", rr.Output())
}
glog.Infof("Took %f seconds t extract the tarball", time.Since(t).Seconds())
// remove the tarball in the VM
if err := r.Runner.Remove(fa); err != nil {
glog.Infof("error removing tarball: %v", err)
}
return r.Restart()
}
// Restart restarts Docker on a host
func (r *Containerd) Restart() error {
return r.Init.Restart("containerd")
}
// containerdImagesPreloaded returns true if all images have been preloaded
func containerdImagesPreloaded(runner command.Runner, images []string) bool {
rr, err := runner.RunCmd(exec.Command("sudo", "crictl", "images", "--output", "json"))
if err != nil {
return false
}
type containerdImages struct {
Images []struct {
ID string `json:"id"`
RepoTags []string `json:"repoTags"`
RepoDigests []string `json:"repoDigests"`
Size string `json:"size"`
UID interface{} `json:"uid"`
Username string `json:"username"`
} `json:"images"`
}
var jsonImages containerdImages
err = json.Unmarshal(rr.Stdout.Bytes(), &jsonImages)
if err != nil {
glog.Errorf("failed to unmarshal images, will assume images are not preloaded")
return false
}
// Make sure images == imgs
for _, i := range images {
found := false
for _, ji := range jsonImages.Images {
for _, rt := range ji.RepoTags {
i = addRepoTagToImageName(i)
if i == rt {
found = true
break
}
}
if found {
break
}
}
if !found {
glog.Infof("couldn't find preloaded image for %q. assuming images are not preloaded.", i)
return false
}
}
glog.Infof("all images are preloaded for containerd runtime.")
return true
}
// addRepoTagToImageName makes sure the image name has a repo tag in it.
// in crictl images list have the repo tag prepended to them
// for example "kubernetesui/dashboard:v2.0.0 will show up as "docker.io/kubernetesui/dashboard:v2.0.0"
// warning this is only meant for kuberentes images where we know the GCR addreses have .io in them
// not mean to be used for public images
func addRepoTagToImageName(imgName string) string {
if !strings.Contains(imgName, ".io/") {
return "docker.io/" + imgName
} // else it already has repo name dont add anything
return imgName
}

View File

@ -0,0 +1,40 @@
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cruntime
import (
"testing"
)
func TestAddRepoTagToImageName(t *testing.T) {
var tests = []struct {
imgName string
want string
}{
{"kubernetesui/dashboard:v2.0.0-rc6", "docker.io/kubernetesui/dashboard:v2.0.0-rc6"},
{"kubernetesui/metrics-scraper:v1.0.2", "docker.io/kubernetesui/metrics-scraper:v1.0.2"},
{"gcr.io/k8s-minikube/storage-provisioner:v1.8.1", "gcr.io/k8s-minikube/storage-provisioner:v1.8.1"},
}
for _, tc := range tests {
t.Run(tc.imgName, func(t *testing.T) {
got := addRepoTagToImageName(tc.imgName)
if got != tc.want {
t.Errorf("expected image name to be: %q but got %q", tc.want, got)
}
})
}
}

View File

@ -220,3 +220,14 @@ func enableIPForwarding(cr CommandRunner) error {
}
return nil
}
// ImagesPreloaded returns true if all images have been preloaded
func ImagesPreloaded(containerRuntime string, runner command.Runner, images []string) bool {
if containerRuntime == "docker" {
return dockerImagesPreloaded(runner, images)
}
if containerRuntime == "containerd" {
return containerdImagesPreloaded(runner, images)
}
return false
}

View File

@ -290,7 +290,7 @@ func (r *Docker) Preload(cfg config.KubernetesConfig) error {
if err != nil {
return errors.Wrap(err, "getting images")
}
if DockerImagesPreloaded(r.Runner, images) {
if dockerImagesPreloaded(r.Runner, images) {
glog.Info("Images already preloaded, skipping extraction")
return nil
}
@ -342,8 +342,8 @@ func (r *Docker) Preload(cfg config.KubernetesConfig) error {
return r.Restart()
}
// DockerImagesPreloaded returns true if all images have been preloaded
func DockerImagesPreloaded(runner command.Runner, images []string) bool {
// dockerImagesPreloaded returns true if all images have been preloaded
func dockerImagesPreloaded(runner command.Runner, images []string) bool {
rr, err := runner.RunCmd(exec.Command("docker", "images", "--format", "{{.Repository}}:{{.Tag}}"))
if err != nil {
return false

View File

@ -31,12 +31,24 @@ func TestCacheBinary(t *testing.T) {
if err != nil {
t.Fatalf("error during creating tmp dir: %v", err)
}
defer func() { //clean up tempdir
err := os.RemoveAll(minikubeHome)
if err != nil {
t.Errorf("failed to clean up temp folder %q", minikubeHome)
}
}()
defer os.RemoveAll(minikubeHome)
noWritePermDir, err := ioutil.TempDir("/tmp", "")
if err != nil {
t.Fatalf("error during creating tmp dir: %v", err)
}
defer os.RemoveAll(noWritePermDir)
defer func() { //clean up tempdir
err := os.RemoveAll(noWritePermDir)
if err != nil {
t.Errorf("failed to clean up temp folder %q", noWritePermDir)
}
}()
err = os.Chmod(noWritePermDir, 0000)
if err != nil {
t.Fatalf("error (%v) during changing permissions of dir %v", err, noWritePermDir)

View File

@ -78,16 +78,16 @@ func remoteTarballURL(k8sVersion, containerRuntime string) string {
// PreloadExists returns true if there is a preloaded tarball that can be used
func PreloadExists(k8sVersion, containerRuntime string) bool {
// TODO: debug why this func is being called two times
glog.Infof("Checking if preload exists for k8s version %s and runtime %s", k8sVersion, containerRuntime)
if !viper.GetBool("preload") {
return false
}
// See https://github.com/kubernetes/minikube/issues/6933
// and https://github.com/kubernetes/minikube/issues/6934
// to track status of adding containerd & crio
if containerRuntime != "docker" {
glog.Info("Container runtime isn't docker, skipping preload")
// to track status of adding crio
if containerRuntime == "crio" {
glog.Info("crio is not supported yet, skipping preload")
return false
}

View File

@ -140,6 +140,16 @@ func HasResourceLimits(name string) bool {
return !(name == None || name == Podman)
}
// NeedsShutdown returns true if driver needs manual shutdown command before stopping.
// Hyper-V requires special care to avoid ACPI and file locking issues
// KIC also needs shutdown to avoid container getting stuck, https://github.com/kubernetes/minikube/issues/7657
func NeedsShutdown(name string) bool {
if name == HyperV || IsKIC(name) {
return true
}
return false
}
// FlagHints are hints for what default options should be used for this driver
type FlagHints struct {
ExtraOptions []string

View File

@ -454,7 +454,7 @@ func writeStringsToFiles(e *state, output string) error {
if !strings.HasSuffix(path, ".json") {
return nil
}
fmt.Printf("Writing to %s\n", filepath.Base(path))
fmt.Printf("Writing to %s", filepath.Base(path))
currentTranslations := make(map[string]interface{})
f, err := ioutil.ReadFile(path)
if err != nil {
@ -482,6 +482,16 @@ func writeStringsToFiles(e *state, output string) error {
}
}
t := 0 // translated
u := 0 // untranslated
for k := range e.translations {
if currentTranslations[k] != "" {
t++
} else {
u++
}
}
c, err := json.MarshalIndent(currentTranslations, "", "\t")
if err != nil {
return errors.Wrap(err, "marshalling translations")
@ -490,10 +500,26 @@ func writeStringsToFiles(e *state, output string) error {
if err != nil {
return errors.Wrap(err, "writing translation file")
}
fmt.Printf(" (%d translated, %d untranslated)\n", t, u)
return nil
})
return err
if err != nil {
return err
}
c, err := json.MarshalIndent(e.translations, "", "\t")
if err != nil {
return errors.Wrap(err, "marshalling translations")
}
path := filepath.Join(output, "strings.txt")
err = lock.WriteFile(path, c, 0644)
if err != nil {
return errors.Wrap(err, "writing translation file")
}
return nil
}
// addParentFuncToList adds the current parent function to the list of functions to inspect more closely.

View File

@ -36,7 +36,12 @@ func TestExtract(t *testing.T) {
if err != nil {
t.Fatalf("Creating temp dir: %v", err)
}
defer os.RemoveAll(tempdir)
defer func() { //clean up tempdir
err := os.RemoveAll(tempdir)
if err != nil {
t.Errorf("failed to clean up temp folder %q", tempdir)
}
}()
src, err := ioutil.ReadFile("testdata/test.json")
if err != nil {

View File

@ -79,23 +79,28 @@ func DigestByGoLib(imgName string) string {
return cf.Hex
}
// WriteImageToDaemon write img to the local docker daemon
func WriteImageToDaemon(img string) error {
glog.Infof("Writing %s to local daemon", img)
// ExistsImageInDaemon if img exist in local docker daemon
func ExistsImageInDaemon(img string) bool {
// Check if image exists locally
cmd := exec.Command("docker", "images", "--format", "{{.Repository}}:{{.Tag}}@{{.Digest}}")
if output, err := cmd.Output(); err == nil {
if strings.Contains(string(output), img) {
glog.Infof("Found %s in local docker daemon, skipping pull", img)
return nil
return true
}
}
// Else, pull it
return false
}
// WriteImageToDaemon write img to the local docker daemon
func WriteImageToDaemon(img string) error {
glog.Infof("Writing %s to local daemon", img)
ref, err := name.ParseReference(img)
if err != nil {
return errors.Wrap(err, "parsing reference")
}
glog.V(3).Infof("Getting image %v", ref)
i, err := remote.Image(ref)
if err != nil {
return errors.Wrap(err, "getting remote image")
@ -104,8 +109,26 @@ func WriteImageToDaemon(img string) error {
if err != nil {
return errors.Wrap(err, "getting tag")
}
glog.V(3).Infof("Writing image %v", tag)
_, err = daemon.Write(tag, i)
return err
if err != nil {
return errors.Wrap(err, "writing image")
}
//TODO: Make pkg/v1/daemon accept Ref too
// Only added it to pkg/v1/tarball
//
// https://github.com/google/go-containerregistry/pull/702
glog.V(3).Infof("Pulling image %v", ref)
// Pull digest
cmd := exec.Command("docker", "pull", "--quiet", img)
if _, err := cmd.Output(); err != nil {
return errors.Wrap(err, "pulling remote image")
}
return nil
}
func retrieveImage(ref name.Reference) (v1.Image, error) {

View File

@ -21,6 +21,7 @@ import (
"io/ioutil"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
@ -30,6 +31,7 @@ import (
"k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/tools/clientcmd/api/latest"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/localpath"
pkgutil "k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/util/lock"
)
@ -103,24 +105,43 @@ func Endpoint(contextName string, configPath ...string) (string, int, error) {
}
// UpdateEndpoint overwrites the IP stored in kubeconfig with the provided IP.
func UpdateEndpoint(contextName string, hostname string, port int, path string) (bool, error) {
func UpdateEndpoint(contextName string, hostname string, port int, confpath string) (bool, error) {
if hostname == "" {
return false, fmt.Errorf("empty ip")
}
err := VerifyEndpoint(contextName, hostname, port, path)
err := VerifyEndpoint(contextName, hostname, port, confpath)
if err == nil {
return false, nil
}
glog.Infof("verify returned: %v", err)
cfg, err := readOrNew(path)
cfg, err := readOrNew(confpath)
if err != nil {
return false, errors.Wrap(err, "read")
}
cfg.Clusters[contextName].Server = "https://" + hostname + ":" + strconv.Itoa(port)
err = writeToFile(cfg, path)
address := "https://" + hostname + ":" + strconv.Itoa(port)
// if the kubeconfig is missed, create new one
if len(cfg.Clusters) == 0 {
lp := localpath.Profile(contextName)
gp := localpath.MiniPath()
kcs := &Settings{
ClusterName: contextName,
ClusterServerAddress: address,
ClientCertificate: path.Join(lp, "client.crt"),
ClientKey: path.Join(lp, "client.key"),
CertificateAuthority: path.Join(gp, "ca.crt"),
KeepContext: false,
}
err = PopulateFromSettings(kcs, cfg)
if err != nil {
return false, errors.Wrap(err, "populating kubeconfig")
}
}
cfg.Clusters[contextName].Server = address
err = writeToFile(cfg, confpath)
if err != nil {
return false, errors.Wrap(err, "write")
}

View File

@ -167,6 +167,13 @@ func TestUpdate(t *testing.T) {
if err != nil {
t.Fatalf("Error making temp directory %v", err)
}
defer func() { //clean up tempdir
err := os.RemoveAll(tmpDir)
if err != nil {
t.Errorf("failed to clean up temp folder %q", tmpDir)
}
}()
test.cfg.SetPath(filepath.Join(tmpDir, "kubeconfig"))
if len(test.existingCfg) != 0 {
if err := ioutil.WriteFile(test.cfg.filePath(), test.existingCfg, 0600); err != nil {

View File

@ -33,7 +33,12 @@ func TestReplaceWinDriveLetterToVolumeName(t *testing.T) {
if err != nil {
t.Fatalf("Error make tmp directory: %v", err)
}
defer os.RemoveAll(path)
defer func() { //clean up tempdir
err := os.RemoveAll(path)
if err != nil {
t.Errorf("failed to clean up temp folder %q", path)
}
}()
if runtime.GOOS != "windows" {
// Replace to fake func.

View File

@ -89,7 +89,13 @@ func TestCacheBinariesForBootstrapper(t *testing.T) {
if err != nil {
t.Fatalf("error during creating tmp dir: %v", err)
}
defer os.RemoveAll(minikubeHome)
defer func() { //clean up tempdir
err := os.RemoveAll(minikubeHome)
if err != nil {
t.Errorf("failed to clean up temp folder %q", minikubeHome)
}
}()
var tc = []struct {
version, clusterBootstrapper string

View File

@ -65,11 +65,10 @@ func CacheImagesForBootstrapper(imageRepository string, version string, clusterB
// LoadImages loads previously cached images into the container runtime
func LoadImages(cc *config.ClusterConfig, runner command.Runner, images []string, cacheDir string) error {
// Skip loading images if images already exist
if cruntime.DockerImagesPreloaded(runner, images) {
if cruntime.ImagesPreloaded(cc.KubernetesConfig.ContainerRuntime, runner, images) {
glog.Infof("Images are preloaded, skipping loading")
return nil
}
glog.Infof("LoadImages start: %s", images)
start := time.Now()

View File

@ -125,7 +125,12 @@ func makeTempDir() string {
func TestRunNotDriver(t *testing.T) {
tempDir := makeTempDir()
defer os.RemoveAll(tempDir)
defer func() { //clean up tempdir
err := os.RemoveAll(tempDir)
if err != nil {
t.Errorf("failed to clean up temp folder %q", tempDir)
}
}()
StartDriver()
if !localbinary.CurrentBinaryIsDockerMachine {
t.Fatal("CurrentBinaryIsDockerMachine not set. This will break driver initialization.")

View File

@ -49,6 +49,9 @@ func deleteOrphanedKIC(ociBin string, name string) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := oci.ShutDown(ociBin, name); err != nil {
glog.Infof("couldn't shut down %s (might be okay): %v ", name, err)
}
cmd := exec.CommandContext(ctx, ociBin, "rm", "-f", "-v", name)
err = cmd.Run()
if err == nil {
@ -77,8 +80,8 @@ func DeleteHost(api libmachine.API, machineName string) error {
return mcnerror.ErrHostDoesNotExist{Name: machineName}
}
// Hyper-V requires special care to avoid ACPI and file locking issues
if host.Driver.DriverName() == driver.HyperV {
// some drivers need manual shut down before delete to avoid getting stuck.
if driver.NeedsShutdown(host.Driver.DriverName()) {
if err := StopHost(api, machineName); err != nil {
glog.Warningf("stop host: %v", err)
}

View File

@ -108,6 +108,12 @@ func TestAssetsFromDir(t *testing.T) {
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
testDir, err := setupTestDir()
defer func() { //clean up tempdir
err := os.RemoveAll(testDir)
if err != nil {
t.Errorf("failed to clean up temp folder %q", testDir)
}
}()
if err != nil {
t.Errorf("got unexpected error creating test dir: %v", err)
return

View File

@ -106,8 +106,9 @@ func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node
if serr != nil || s == state.Stopped || s == state.None {
// If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine
me, err := machineExists(h.Driver.DriverName(), s, serr)
glog.Infof("exists: %v err=%v", me, err)
glog.Infof("%q vs %q", err, constants.ErrMachineMissing)
if err != nil {
glog.Infof("machineExists: %t. err=%v", me, err)
}
if !me || err == constants.ErrMachineMissing {
out.T(out.Shrug, `{{.driver_name}} "{{.cluster}}" {{.machine_type}} is missing, will recreate.`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType})

View File

@ -102,7 +102,7 @@ func createHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*h
glog.Infof("createHost starting for %q (driver=%q)", n.Name, cfg.Driver)
start := time.Now()
defer func() {
glog.Infof("createHost completed in %s", time.Since(start))
glog.Infof("duration metric: createHost completed in %s", time.Since(start))
}()
if cfg.Driver == driver.VMwareFusion && viper.GetBool(config.ShowDriverDeprecationNotification) {
@ -136,11 +136,11 @@ func createHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*h
cstart := time.Now()
glog.Infof("libmachine.API.Create for %q (driver=%q)", cfg.Name, cfg.Driver)
// Allow two minutes to create host before failing fast
if err := timedCreateHost(h, api, 2*time.Minute); err != nil {
if err := timedCreateHost(h, api, 4*time.Minute); err != nil {
return nil, errors.Wrap(err, "creating host")
}
glog.Infof("libmachine.API.Create for %q took %s", cfg.Name, time.Since(cstart))
glog.Infof("duration metric: libmachine.API.Create for %q took %s", cfg.Name, time.Since(cstart))
if err := postStartSetup(h, cfg); err != nil {
return h, errors.Wrap(err, "post-start")
@ -206,7 +206,7 @@ func postStartSetup(h *host.Host, mc config.ClusterConfig) error {
if driver.BareMetal(mc.Driver) {
showLocalOsRelease()
}
if driver.IsVM(mc.Driver) {
if driver.IsVM(mc.Driver) || driver.IsKIC(mc.Driver) {
logRemoteOsRelease(r)
}
return syncLocalAssets(r)

View File

@ -25,6 +25,7 @@ import (
"github.com/docker/machine/libmachine/state"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/util/retry"
@ -45,8 +46,7 @@ func StopHost(api libmachine.API, machineName string) error {
// stop forcibly stops a host without needing to load
func stop(h *host.Host) error {
start := time.Now()
if h.DriverName == driver.HyperV {
glog.Infof("As there are issues with stopping Hyper-V VMs using API, trying to shut down using SSH")
if driver.NeedsShutdown(h.DriverName) {
if err := trySSHPowerOff(h); err != nil {
return errors.Wrap(err, "ssh power off")
}
@ -61,7 +61,7 @@ func stop(h *host.Host) error {
}
return &retry.RetriableError{Err: errors.Wrap(err, "stop")}
}
glog.Infof("stop complete within %s", time.Since(start))
glog.Infof("duration metric: stop complete within %s", time.Since(start))
return nil
}
@ -78,8 +78,14 @@ func trySSHPowerOff(h *host.Host) error {
}
out.T(out.Shutdown, `Powering off "{{.profile_name}}" via SSH ...`, out.V{"profile_name": h.Name})
out, err := h.RunSSHCommand("sudo poweroff")
// poweroff always results in an error, since the host disconnects.
glog.Infof("poweroff result: out=%s, err=%v", out, err)
// differnet for kic because RunSSHCommand is not implemented by kic
if driver.IsKIC(h.DriverName) {
err := oci.ShutDown(h.DriverName, h.Name)
glog.Infof("shutdown container: err=%v", err)
} else {
out, err := h.RunSSHCommand("sudo poweroff")
// poweroff always results in an error, since the host disconnects.
glog.Infof("poweroff result: out=%s, err=%v", out, err)
}
return nil
}

View File

@ -43,11 +43,12 @@ const (
// BeginCacheKubernetesImages caches images required for kubernetes version in the background
func beginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVersion string, cRuntime string) {
if download.PreloadExists(k8sVersion, cRuntime) {
// TODO: remove imageRepository check once #7695 is fixed
if imageRepository == "" && download.PreloadExists(k8sVersion, cRuntime) {
glog.Info("Caching tarball of preloaded images")
err := download.Preload(k8sVersion, cRuntime)
if err == nil {
glog.Infof("Finished downloading the preloaded tar for %s on %s", k8sVersion, cRuntime)
glog.Infof("Finished verifying existence of preloaded tar for %s on %s", k8sVersion, cRuntime)
return // don't cache individual images if preload is successful.
}
glog.Warningf("Error downloading preloaded artifacts will continue without preload: %v", err)
@ -100,12 +101,14 @@ func doCacheBinaries(k8sVersion string) error {
// BeginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available
func beginDownloadKicArtifacts(g *errgroup.Group) {
out.T(out.Pulling, "Pulling base image ...")
glog.Info("Beginning downloading kic artifacts")
g.Go(func() error {
glog.Infof("Downloading %s to local daemon", kic.BaseImage)
return image.WriteImageToDaemon(kic.BaseImage)
})
if !image.ExistsImageInDaemon(kic.BaseImage) {
out.T(out.Pulling, "Pulling base image ...")
g.Go(func() error {
glog.Infof("Downloading %s to local daemon", kic.BaseImage)
return image.WriteImageToDaemon(kic.BaseImage)
})
}
}
// WaitDownloadKicArtifacts blocks until the required artifacts for KIC are downloaded.

View File

@ -37,7 +37,6 @@ import (
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/addons"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/command"
@ -145,8 +144,8 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
prepareNone()
}
// Skip pre-existing, because we already waited for health
if kverify.ShouldWait(starter.Cfg.VerifyComponents) && !starter.PreExists {
// TODO: existing cluster should wait for health #7597
if !starter.PreExists {
if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil {
return nil, errors.Wrap(err, "Wait failed")
}
@ -156,9 +155,23 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
return nil, errors.Wrap(err, "Updating node")
}
cpBs, err := cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, starter.Runner)
// Make sure to use the command runner for the control plane to generate the join token
cp, err := config.PrimaryControlPlane(starter.Cfg)
if err != nil {
return nil, errors.Wrap(err, "Getting bootstrapper")
return nil, errors.Wrap(err, "getting primary control plane")
}
h, err := machine.LoadHost(starter.MachineAPI, driver.MachineName(*starter.Cfg, cp))
if err != nil {
return nil, errors.Wrap(err, "getting control plane host")
}
cpr, err := machine.CommandRunner(h)
if err != nil {
return nil, errors.Wrap(err, "getting control plane command runner")
}
cpBs, err := cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, cpr)
if err != nil {
return nil, errors.Wrap(err, "getting control plane bootstrapper")
}
joinCmd, err := cpBs.GenerateToken(*starter.Cfg)
@ -261,24 +274,16 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node,
out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value})
}
// Loads cached images, generates config files, download binaries
// update cluster and set up certs in parallel
var parallel sync.WaitGroup
parallel.Add(2)
go func() {
if err := bs.UpdateCluster(cfg); err != nil {
exit.WithError("Failed to update cluster", err)
}
parallel.Done()
}()
// update cluster and set up certs
go func() {
if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil {
exit.WithError("Failed to setup certs", err)
}
parallel.Done()
}()
if err := bs.UpdateCluster(cfg); err != nil {
exit.WithError("Failed to update cluster", err)
}
if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil {
exit.WithError("Failed to setup certs", err)
}
parallel.Wait()
return bs
}
@ -398,7 +403,7 @@ func validateNetwork(h *host.Host, r command.Runner, imageRepository string) (st
ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY
k = strings.ToUpper(k) // for http_proxy & https_proxy
if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce {
out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"})
out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/"})
warnedOnce = true
}
}

View File

@ -108,6 +108,7 @@ var styles = map[StyleEnum]style{
Enabling: {Prefix: "🔌 "},
Shutdown: {Prefix: "🛑 "},
Pulling: {Prefix: "🚜 "},
HealthCheck: {Prefix: "🔎 "},
Verifying: {Prefix: "🤔 "},
VerifyingNoLine: {Prefix: "🤔 ", OmitNewline: true},
Kubectl: {Prefix: "💗 "},

View File

@ -73,6 +73,7 @@ const (
Enabling
Shutdown
Pulling
HealthCheck
Verifying
VerifyingNoLine
Kubectl

108
pkg/minikube/perf/binary.go Normal file
View File

@ -0,0 +1,108 @@
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package perf
import (
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/constants"
)
type Binary struct {
path string
pr int
}
const (
prPrefix = "pr://"
)
// NewBinary returns a new binary type
func NewBinary(b string) (*Binary, error) {
// If it doesn't have the prefix, assume a path
if !strings.HasPrefix(b, prPrefix) {
return &Binary{
path: b,
}, nil
}
return newBinaryFromPR(b)
}
// Name returns the name of the binary
func (b *Binary) Name() string {
if b.pr != 0 {
return fmt.Sprintf("Minikube (PR %d)", b.pr)
}
return filepath.Base(b.path)
}
// newBinaryFromPR downloads the minikube binary built for the pr by Jenkins from GCS
func newBinaryFromPR(pr string) (*Binary, error) {
pr = strings.TrimPrefix(pr, prPrefix)
// try to convert to int
i, err := strconv.Atoi(pr)
if err != nil {
return nil, errors.Wrapf(err, "converting %s to an integer", pr)
}
b := &Binary{
path: localMinikubePath(i),
pr: i,
}
if err := downloadBinary(remoteMinikubeURL(i), b.path); err != nil {
return nil, errors.Wrapf(err, "downloading minikube")
}
return b, nil
}
func remoteMinikubeURL(pr int) string {
return fmt.Sprintf("https://storage.googleapis.com/minikube-builds/%d/minikube-linux-amd64", pr)
}
func localMinikubePath(pr int) string {
return fmt.Sprintf("%s/minikube-binaries/%d/minikube", constants.DefaultMinipath, pr)
}
func downloadBinary(url, path string) error {
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil {
return err
}
f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0777)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(f, resp.Body)
return err
}

View File

@ -35,21 +35,26 @@ const (
var (
// For testing
collectTimeMinikubeStart = timeMinikubeStart
collectTimeMinikubeStart = collectTimes
)
// CompareMinikubeStart compares the time to run `minikube start` between two minikube binaries
func CompareMinikubeStart(ctx context.Context, out io.Writer, binaries []string) error {
durations, err := collectTimes(ctx, binaries)
func CompareMinikubeStart(ctx context.Context, out io.Writer, binaries []*Binary) error {
durations, err := collectTimeMinikubeStart(ctx, binaries)
if err != nil {
return err
}
fmt.Fprintf(out, "Old binary: %v\nNew binary: %v\nAverage Old: %f\nAverage New: %f\n", durations[0], durations[1], average(durations[0]), average(durations[1]))
for i, d := range durations {
fmt.Fprintf(out, "Results for %s:\n", binaries[i].Name())
fmt.Fprintf(out, "Times: %v\n", d)
fmt.Fprintf(out, "Average Time: %f\n\n", average(d))
}
return nil
}
func collectTimes(ctx context.Context, binaries []string) ([][]float64, error) {
func collectTimes(ctx context.Context, binaries []*Binary) ([][]float64, error) {
durations := make([][]float64, len(binaries))
for i := range durations {
durations[i] = make([]float64, runs)
@ -58,9 +63,9 @@ func collectTimes(ctx context.Context, binaries []string) ([][]float64, error) {
for r := 0; r < runs; r++ {
log.Printf("Executing run %d...", r)
for index, binary := range binaries {
duration, err := collectTimeMinikubeStart(ctx, binary)
duration, err := timeMinikubeStart(ctx, binary)
if err != nil {
return nil, errors.Wrapf(err, "timing run %d with %s", r, binary)
return nil, errors.Wrapf(err, "timing run %d with %s", r, binary.Name())
}
durations[index][r] = duration
}
@ -79,12 +84,12 @@ func average(nums []float64) float64 {
// timeMinikubeStart returns the time it takes to execute `minikube start`
// It deletes the VM after `minikube start`.
func timeMinikubeStart(ctx context.Context, binary string) (float64, error) {
startCmd := exec.CommandContext(ctx, binary, "start")
func timeMinikubeStart(ctx context.Context, binary *Binary) (float64, error) {
startCmd := exec.CommandContext(ctx, binary.path, "start")
startCmd.Stdout = os.Stdout
startCmd.Stderr = os.Stderr
deleteCmd := exec.CommandContext(ctx, binary, "delete")
deleteCmd := exec.CommandContext(ctx, binary.path, "delete")
defer func() {
if err := deleteCmd.Run(); err != nil {
log.Printf("error deleting minikube: %v", err)

View File

@ -19,86 +19,64 @@ package perf
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
)
func mockCollectTimeMinikubeStart(durations []float64) func(ctx context.Context, binary string) (float64, error) {
index := 0
return func(context.Context, string) (float64, error) {
duration := durations[index]
index++
return duration, nil
func mockCollectTimes(times [][]float64) func(ctx context.Context, binaries []*Binary) ([][]float64, error) {
return func(ctx context.Context, binaries []*Binary) ([][]float64, error) {
return times, nil
}
}
func TestCompareMinikubeStartOutput(t *testing.T) {
binaries := []*Binary{
{
path: "minikube1",
}, {
path: "minikube2",
},
}
tests := []struct {
description string
durations []float64
times [][]float64
expected string
}{
{
description: "standard run",
durations: []float64{4.5, 6},
expected: "Old binary: [4.5]\nNew binary: [6]\nAverage Old: 4.500000\nAverage New: 6.000000\n",
times: [][]float64{{4.5, 6}, {1, 2}},
expected: `Results for minikube1:
Times: [4.5 6]
Average Time: 5.250000
Results for minikube2:
Times: [1 2]
Average Time: 1.500000
`,
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
originalCollectTimes := collectTimeMinikubeStart
collectTimeMinikubeStart = mockCollectTimeMinikubeStart(test.durations)
originalCollectTimes := collectTimes
collectTimeMinikubeStart = mockCollectTimes(test.times)
defer func() { collectTimeMinikubeStart = originalCollectTimes }()
buf := bytes.NewBuffer([]byte{})
err := CompareMinikubeStart(context.Background(), buf, []string{"", ""})
err := CompareMinikubeStart(context.Background(), buf, binaries)
if err != nil {
t.Fatalf("error comparing minikube start: %v", err)
}
actual := buf.String()
if test.expected != actual {
t.Fatalf("actual output does not match expected output\nActual: %v\nExpected: %v", actual, test.expected)
if diff := cmp.Diff(test.expected, actual); diff != "" {
t.Errorf("machines mismatch (-want +got):\n%s", diff)
}
})
}
}
func TestCollectTimes(t *testing.T) {
tests := []struct {
description string
durations []float64
expected [][]float64
}{
{
description: "test collect time",
durations: []float64{1, 2},
expected: [][]float64{
{1},
{2},
},
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
originalCollectTimes := collectTimeMinikubeStart
collectTimeMinikubeStart = mockCollectTimeMinikubeStart(test.durations)
defer func() { collectTimeMinikubeStart = originalCollectTimes }()
actual, err := collectTimes(context.Background(), []string{"", ""})
if err != nil {
t.Fatalf("error collecting times: %v", err)
}
if !reflect.DeepEqual(actual, test.expected) {
t.Fatalf("actual output does not match expected output\nActual: %v\nExpected: %v", actual, test.expected)
}
})
}
}
func TestAverage(t *testing.T) {
tests := []struct {
description string

View File

@ -367,8 +367,8 @@ var vmProblems = map[string]match{
}
// proxyDoc is the URL to proxy documentation
const proxyDoc = "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"
const vpnDoc = "https://minikube.sigs.k8s.io/docs/reference/networking/vpn/"
const proxyDoc = "https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/"
const vpnDoc = "https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/"
// netProblems are network related problems.
var netProblems = map[string]match{

View File

@ -40,19 +40,28 @@ func isInBlock(ip string, block string) (bool, error) {
return false, fmt.Errorf("CIDR is nil")
}
if ip == block {
return true, nil
}
i := net.ParseIP(ip)
if i == nil {
return false, fmt.Errorf("parsed IP is nil")
}
_, b, err := net.ParseCIDR(block)
if err != nil {
return false, errors.Wrapf(err, "Error Parsing block %s", b)
// check the block if it's CIDR
if strings.Contains(block, "/") {
_, b, err := net.ParseCIDR(block)
if err != nil {
return false, errors.Wrapf(err, "Error Parsing block %s", b)
}
if b.Contains(i) {
return true, nil
}
}
if b.Contains(i) {
return true, nil
}
return false, errors.Wrapf(err, "Error ip not in block")
return false, errors.New("Error ip not in block")
}
// ExcludeIP takes ip or CIDR as string and excludes it from the http(s)_proxy
@ -101,7 +110,11 @@ func checkEnv(ip string, env string) bool {
// Checks if included in IP ranges, i.e., 192.168.39.13/24
noProxyBlocks := strings.Split(v, ",")
for _, b := range noProxyBlocks {
if yes, _ := isInBlock(ip, b); yes {
yes, err := isInBlock(ip, b)
if err != nil {
glog.Warningf("fail to check proxy env: %v", err)
}
if yes {
return true
}
}

View File

@ -53,8 +53,10 @@ func TestIsInBlock(t *testing.T) {
wanntAErr bool
}{
{"", "192.168.0.1/32", false, true},
{"192.168.0.1", "", false, true},
{"192.168.0.1", "192.168.0.1", true, false},
{"192.168.0.1", "192.168.0.1/32", true, false},
{"192.168.0.2", "192.168.0.1/32", false, false},
{"192.168.0.2", "192.168.0.1/32", false, true},
{"192.168.0.1", "192.168.0.1/18", true, false},
{"abcd", "192.168.0.1/18", false, true},
{"192.168.0.1", "foo", false, true},
@ -122,6 +124,7 @@ func TestCheckEnv(t *testing.T) {
{"192.168.0.13", "NO_PROXY", false, ""},
{"192.168.0.13", "NO_PROXY", false, ","},
{"192.168.0.13", "NO_PROXY", true, "192.168.0.13"},
{"192.168.0.13", "NO_PROXY", false, "192.168.0.14"},
{"192.168.0.13", "NO_PROXY", true, ",192.168.0.13"},
{"192.168.0.13", "NO_PROXY", true, "10.10.0.13,192.168.0.13"},
{"192.168.0.13", "NO_PROXY", true, "192.168.0.13/22"},

View File

@ -97,6 +97,10 @@ func status() registry.State {
stderr := strings.TrimSpace(string(exitErr.Stderr))
newErr := fmt.Errorf(`%q %v: %s`, strings.Join(cmd.Args, " "), exitErr, stderr)
if strings.Contains(stderr, "permission denied") && runtime.GOOS == "linux" {
return registry.State{Error: newErr, Installed: true, Healthy: false, Fix: "Add your user to the 'docker' group: 'sudo usermod -aG docker $USER && newgrp docker'", Doc: "https://docs.docker.com/engine/install/linux-postinstall/"}
}
if strings.Contains(stderr, "Cannot connect") || strings.Contains(stderr, "refused") || strings.Contains(stderr, "Is the docker daemon running") {
return registry.State{Error: newErr, Installed: true, Healthy: false, Fix: "Start the Docker service", Doc: docURL}
}

View File

@ -22,6 +22,7 @@ import (
"sort"
"github.com/golang/glog"
"k8s.io/minikube/pkg/minikube/translate"
)
const (
@ -74,7 +75,8 @@ type DriverState struct {
func (d DriverState) String() string {
if d.Priority == Experimental {
return fmt.Sprintf("%s (experimental)", d.Name)
experimental := translate.T("experimental")
return fmt.Sprintf("%s (%s)", d.Name, experimental)
}
return d.Name
}

View File

@ -31,23 +31,17 @@ import (
"github.com/golang/glog"
"github.com/olekukonko/tablewriter"
"github.com/pkg/errors"
"github.com/spf13/viper"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
typed_core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/kapi"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/proxy"
"k8s.io/minikube/pkg/util/retry"
)
const (
defaultK8sClientTimeout = 60 * time.Second
// DefaultWait is the default wait time, in seconds
DefaultWait = 2
// DefaultInterval is the default interval, in seconds
@ -56,8 +50,7 @@ const (
// K8sClient represents a kubernetes client
type K8sClient interface {
GetCoreClient() (typed_core.CoreV1Interface, error)
GetClientset(timeout time.Duration) (*kubernetes.Clientset, error)
GetCoreClient(string) (typed_core.CoreV1Interface, error)
}
// K8sClientGetter can get a K8sClient
@ -71,39 +64,14 @@ func init() {
}
// GetCoreClient returns a core client
func (k *K8sClientGetter) GetCoreClient() (typed_core.CoreV1Interface, error) {
client, err := k.GetClientset(defaultK8sClientTimeout)
func (k *K8sClientGetter) GetCoreClient(context string) (typed_core.CoreV1Interface, error) {
client, err := kapi.Client(context)
if err != nil {
return nil, errors.Wrap(err, "getting clientset")
return nil, errors.Wrap(err, "client")
}
return client.CoreV1(), nil
}
// GetClientset returns a clientset
func (*K8sClientGetter) GetClientset(timeout time.Duration) (*kubernetes.Clientset, error) {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
profile := viper.GetString(config.ProfileName)
configOverrides := &clientcmd.ConfigOverrides{
Context: clientcmdapi.Context{
Cluster: profile,
AuthInfo: profile,
},
}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
clientConfig, err := kubeConfig.ClientConfig()
if err != nil {
return nil, fmt.Errorf("kubeConfig: %v", err)
}
clientConfig.Timeout = timeout
clientConfig = proxy.UpdateTransport(clientConfig)
client, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, errors.Wrap(err, "client from config")
}
return client, nil
}
// SvcURL represents a service URL. Each item in the URLs field combines the service URL with one of the configured
// node ports. The PortNames field contains the configured names of the ports in the URLs field (sorted correspondingly -
// first item in PortNames belongs to the first item in URLs).
@ -119,8 +87,8 @@ type URLs []SvcURL
// GetServiceURLs returns a SvcURL object for every service in a particular namespace.
// Accepts a template for formatting
func GetServiceURLs(api libmachine.API, namespace string, t *template.Template) (URLs, error) {
host, err := machine.LoadHost(api, viper.GetString(config.ProfileName))
func GetServiceURLs(api libmachine.API, cname string, namespace string, t *template.Template) (URLs, error) {
host, err := machine.LoadHost(api, cname)
if err != nil {
return nil, err
}
@ -130,7 +98,7 @@ func GetServiceURLs(api libmachine.API, namespace string, t *template.Template)
return nil, err
}
client, err := K8s.GetCoreClient()
client, err := K8s.GetCoreClient(cname)
if err != nil {
return nil, err
}
@ -155,8 +123,8 @@ func GetServiceURLs(api libmachine.API, namespace string, t *template.Template)
}
// GetServiceURLsForService returns a SvcUrl object for a service in a namespace. Supports optional formatting.
func GetServiceURLsForService(api libmachine.API, namespace, service string, t *template.Template) (SvcURL, error) {
host, err := machine.LoadHost(api, viper.GetString(config.ProfileName))
func GetServiceURLsForService(api libmachine.API, cname string, namespace, service string, t *template.Template) (SvcURL, error) {
host, err := machine.LoadHost(api, cname)
if err != nil {
return SvcURL{}, errors.Wrap(err, "Error checking if api exist and loading it")
}
@ -166,7 +134,7 @@ func GetServiceURLsForService(api libmachine.API, namespace, service string, t *
return SvcURL{}, errors.Wrap(err, "Error getting ip from host")
}
client, err := K8s.GetCoreClient()
client, err := K8s.GetCoreClient(cname)
if err != nil {
return SvcURL{}, err
}
@ -226,8 +194,8 @@ func printURLsForService(c typed_core.CoreV1Interface, ip, service, namespace st
}
// CheckService checks if a service is listening on a port.
func CheckService(namespace string, service string) error {
client, err := K8s.GetCoreClient()
func CheckService(cname string, namespace string, service string) error {
client, err := K8s.GetCoreClient(cname)
if err != nil {
return errors.Wrap(err, "Error getting kubernetes client")
}
@ -283,7 +251,7 @@ func (t SVCNotFoundError) Error() string {
}
// WaitForService waits for a service, and return the urls when available
func WaitForService(api libmachine.API, namespace string, service string, urlTemplate *template.Template, urlMode bool, https bool,
func WaitForService(api libmachine.API, cname string, namespace string, service string, urlTemplate *template.Template, urlMode bool, https bool,
wait int, interval int) ([]string, error) {
var urlList []string
// Convert "Amount of time to wait" and "interval of each check" to attempts
@ -291,18 +259,18 @@ func WaitForService(api libmachine.API, namespace string, service string, urlTem
interval = 1
}
err := CheckService(namespace, service)
err := CheckService(cname, namespace, service)
if err != nil {
return nil, &SVCNotFoundError{err}
}
chkSVC := func() error { return CheckService(namespace, service) }
chkSVC := func() error { return CheckService(cname, namespace, service) }
if err := retry.Expo(chkSVC, time.Duration(interval)*time.Second, time.Duration(wait)*time.Second); err != nil {
return nil, &SVCNotFoundError{err}
}
serviceURL, err := GetServiceURLsForService(api, namespace, service, urlTemplate)
serviceURL, err := GetServiceURLsForService(api, cname, namespace, service, urlTemplate)
if err != nil {
return urlList, errors.Wrap(err, "Check that minikube is running and that you have specified the correct namespace")
}
@ -330,8 +298,8 @@ func WaitForService(api libmachine.API, namespace string, service string, urlTem
}
// GetServiceListByLabel returns a ServiceList by label
func GetServiceListByLabel(namespace string, key string, value string) (*core.ServiceList, error) {
client, err := K8s.GetCoreClient()
func GetServiceListByLabel(cname string, namespace string, key string, value string) (*core.ServiceList, error) {
client, err := K8s.GetCoreClient(cname)
if err != nil {
return &core.ServiceList{}, &retry.RetriableError{Err: err}
}
@ -349,8 +317,8 @@ func getServiceListFromServicesByLabel(services typed_core.ServiceInterface, key
}
// CreateSecret creates or modifies secrets
func CreateSecret(namespace, name string, dataValues map[string]string, labels map[string]string) error {
client, err := K8s.GetCoreClient()
func CreateSecret(cname string, namespace, name string, dataValues map[string]string, labels map[string]string) error {
client, err := K8s.GetCoreClient(cname)
if err != nil {
return &retry.RetriableError{Err: err}
}
@ -363,7 +331,7 @@ func CreateSecret(namespace, name string, dataValues map[string]string, labels m
// Delete existing secret
if len(secret.Name) > 0 {
err = DeleteSecret(namespace, name)
err = DeleteSecret(cname, namespace, name)
if err != nil {
return &retry.RetriableError{Err: err}
}
@ -394,8 +362,8 @@ func CreateSecret(namespace, name string, dataValues map[string]string, labels m
}
// DeleteSecret deletes a secret from a namespace
func DeleteSecret(namespace, name string) error {
client, err := K8s.GetCoreClient()
func DeleteSecret(cname string, namespace, name string) error {
client, err := K8s.GetCoreClient(cname)
if err != nil {
return &retry.RetriableError{Err: err}
}

View File

@ -26,8 +26,6 @@ import (
"testing"
"text/template"
"time"
"github.com/docker/machine/libmachine"
"github.com/docker/machine/libmachine/host"
"github.com/pkg/errors"
@ -35,7 +33,6 @@ import (
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
typed_core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/kubernetes/typed/core/v1/fake"
testing_fake "k8s.io/client-go/testing"
@ -55,7 +52,7 @@ type MockClientGetter struct {
// Force GetCoreClient to fail
var getCoreClientFail bool
func (m *MockClientGetter) GetCoreClient() (typed_core.CoreV1Interface, error) {
func (m *MockClientGetter) GetCoreClient(string) (typed_core.CoreV1Interface, error) {
if getCoreClientFail {
return nil, fmt.Errorf("test Error - Mocked Get")
}
@ -65,10 +62,6 @@ func (m *MockClientGetter) GetCoreClient() (typed_core.CoreV1Interface, error) {
secretsMap: m.secretsMap}, nil
}
func (m *MockClientGetter) GetClientset(timeout time.Duration) (*kubernetes.Clientset, error) {
return nil, nil
}
func (m *MockCoreClient) Secrets(ns string) typed_core.SecretInterface {
return &fake.FakeSecrets{Fake: &fake.FakeCoreV1{Fake: &testing_fake.Fake{}}}
}
@ -476,7 +469,7 @@ func TestGetServiceURLs(t *testing.T) {
servicesMap: serviceNamespaces,
endpointsMap: endpointNamespaces,
}
urls, err := GetServiceURLs(test.api, test.namespace, defaultTemplate)
urls, err := GetServiceURLs(test.api, "minikube", test.namespace, defaultTemplate)
if err != nil && !test.err {
t.Errorf("Error GetServiceURLs %v", err)
}
@ -544,7 +537,7 @@ func TestGetServiceURLsForService(t *testing.T) {
servicesMap: serviceNamespaces,
endpointsMap: endpointNamespaces,
}
svcURL, err := GetServiceURLsForService(test.api, test.namespace, test.service, defaultTemplate)
svcURL, err := GetServiceURLsForService(test.api, "minikube", test.namespace, test.service, defaultTemplate)
if err != nil && !test.err {
t.Errorf("Error GetServiceURLsForService %v", err)
}
@ -626,7 +619,7 @@ users:
os.Setenv("KUBECONFIG", mockK8sConfigPath)
k8s := K8sClientGetter{}
_, err = k8s.GetCoreClient()
_, err = k8s.GetCoreClient("minikube")
if err != nil && !test.err {
t.Fatalf("GetCoreClient returned unexpected error: %v", err)
}
@ -691,7 +684,7 @@ func TestGetServiceListByLabel(t *testing.T) {
secretsMap: secretsNamespaces,
}
getCoreClientFail = test.failedGetClient
svcs, err := GetServiceListByLabel(test.ns, test.name, test.label)
svcs, err := GetServiceListByLabel("minikube", test.ns, test.name, test.label)
if err != nil && !test.err {
t.Fatalf("Test %v got unexpected error: %v", test.description, err)
}
@ -741,7 +734,7 @@ func TestCheckService(t *testing.T) {
secretsMap: secretsNamespaces,
}
getCoreClientFail = test.failedGetClient
err := CheckService(test.ns, test.name)
err := CheckService("minikube", test.ns, test.name)
if err == nil && test.err {
t.Fatalf("Test %v expected error but got nil", test.description)
}
@ -780,7 +773,7 @@ func TestDeleteSecret(t *testing.T) {
secretsMap: secretsNamespaces,
}
getCoreClientFail = test.failedGetClient
err := DeleteSecret(test.ns, test.name)
err := DeleteSecret("minikube", test.ns, test.name)
if err == nil && test.err {
t.Fatalf("Test %v expected error but got nil", test.description)
}
@ -819,7 +812,7 @@ func TestCreateSecret(t *testing.T) {
secretsMap: secretsNamespaces,
}
getCoreClientFail = test.failedGetClient
err := CreateSecret(test.ns, test.name, map[string]string{"ns": "secret"}, map[string]string{"ns": "baz"})
err := CreateSecret("minikube", test.ns, test.name, map[string]string{"ns": "secret"}, map[string]string{"ns": "baz"})
if err == nil && test.err {
t.Fatalf("Test %v expected error but got nil", test.description)
}
@ -921,7 +914,7 @@ func TestWaitAndMaybeOpenService(t *testing.T) {
}
var urlList []string
urlList, err := WaitForService(test.api, test.namespace, test.service, defaultTemplate, test.urlMode, test.https, 1, 0)
urlList, err := WaitForService(test.api, "minikube", test.namespace, test.service, defaultTemplate, test.urlMode, test.https, 1, 0)
if test.err && err == nil {
t.Fatalf("WaitForService expected to fail for test: %v", test)
}
@ -986,7 +979,7 @@ func TestWaitAndMaybeOpenServiceForNotDefaultNamspace(t *testing.T) {
servicesMap: serviceNamespaceOther,
endpointsMap: endpointNamespaces,
}
_, err := WaitForService(test.api, test.namespace, test.service, defaultTemplate, test.urlMode, test.https, 1, 0)
_, err := WaitForService(test.api, "minikube", test.namespace, test.service, defaultTemplate, test.urlMode, test.https, 1, 0)
if test.err && err == nil {
t.Fatalf("WaitForService expected to fail for test: %v", test)
}

View File

@ -22,9 +22,8 @@ import (
"github.com/pkg/errors"
v1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/minikube/pkg/kapi"
)
func annotateDefaultStorageClass(storage storagev1.StorageV1Interface, class *v1.StorageClass, enable bool) error {
@ -71,25 +70,11 @@ func SetDefaultStorageClass(storage storagev1.StorageV1Interface, name string) e
}
// GetStoragev1 return storage v1 interface for client
func GetStoragev1() (storagev1.StorageV1Interface, error) {
client, err := getClient()
func GetStoragev1(context string) (storagev1.StorageV1Interface, error) {
client, err := kapi.Client(context)
if err != nil {
return nil, err
}
sv1 := client.StorageV1()
return sv1, nil
}
func getClient() (*kubernetes.Clientset, error) {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{})
config, err := kubeConfig.ClientConfig()
if err != nil {
return nil, errors.Wrap(err, "Error creating kubeConfig")
}
client, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, errors.Wrap(err, "Error creating new client from kubeConfig.ClientConfig()")
}
return client, nil
}

View File

@ -212,45 +212,6 @@ users:
- name: minikube
`
func TestGetClient(t *testing.T) {
var tests = []struct {
description string
config string
err bool
}{
{
description: "ok",
config: mockK8sConfig,
},
{
description: "no valid config",
config: "this is not valid config",
err: true,
},
}
configFile, err := ioutil.TempFile("/tmp", "")
if err != nil {
t.Fatalf(err.Error())
}
defer os.Remove(configFile.Name())
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
if err := setK8SConfig(test.config, configFile.Name()); err != nil {
t.Fatalf(err.Error())
}
_, err = getClient()
if err != nil && !test.err {
t.Fatalf("Unexpected err: %v for test: %v", err, test.description)
}
if err == nil && test.err {
t.Fatalf("Expected err for test: %v", test.description)
}
})
}
}
func TestGetStoragev1(t *testing.T) {
var tests = []struct {
description string
@ -278,7 +239,8 @@ func TestGetStoragev1(t *testing.T) {
t.Fatalf(err.Error())
}
_, err = GetStoragev1()
// context name is hardcoded by mockK8sConfig
_, err = GetStoragev1("minikube")
if err != nil && !test.err {
t.Fatalf("Unexpected err: %v for test: %v", err, test.description)
}

View File

@ -22,10 +22,12 @@ import (
"net"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"github.com/golang/glog"
"github.com/pkg/errors"
)
func (router *osRouter) EnsureRouteIsAdded(route *Route) error {
@ -37,7 +39,7 @@ func (router *osRouter) EnsureRouteIsAdded(route *Route) error {
return nil
}
if err := writeResolverFile(route); err != nil {
return fmt.Errorf("could not write /etc/resolver/{cluster_domain} file: %s", err)
glog.Errorf("DNS forwarding unavailable: %v", err)
}
serviceCIDR := route.DestCIDR.String()
@ -178,26 +180,48 @@ func (router *osRouter) Cleanup(route *Route) error {
func writeResolverFile(route *Route) error {
resolverFile := "/etc/resolver/" + route.ClusterDomain
content := fmt.Sprintf("nameserver %s\nsearch_order 1\n", route.ClusterDNSIP)
// write resolver content into tmpFile, then copy it to /etc/resolver/clusterDomain
tmpFile, err := ioutil.TempFile("", "minikube-tunnel-resolver-")
glog.Infof("preparing DNS forwarding config in %q:\n%s", resolverFile, content)
// write resolver content into tf, then copy it to /etc/resolver/clusterDomain
tf, err := ioutil.TempFile("", "minikube-tunnel-resolver-")
if err != nil {
return err
return errors.Wrap(err, "tempfile")
}
defer os.Remove(tmpFile.Name())
if _, err = tmpFile.WriteString(content); err != nil {
return err
defer os.Remove(tf.Name())
if _, err = tf.WriteString(content); err != nil {
return errors.Wrap(err, "write")
}
if err = tmpFile.Close(); err != nil {
return err
if err = tf.Close(); err != nil {
return errors.Wrap(err, "close")
}
cmd := exec.Command("sudo", "mkdir", "-p", "/etc/resolver")
if err := cmd.Run(); err != nil {
return err
if err = os.Chmod(tf.Name(), 0644); err != nil {
return errors.Wrap(err, "chmod")
}
cmd = exec.Command("sudo", "cp", "-f", tmpFile.Name(), resolverFile)
if err := cmd.Run(); err != nil {
return err
cmd := exec.Command("sudo", "mkdir", "-p", filepath.Dir(resolverFile))
_, err = cmd.Output()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
return fmt.Errorf("%q failed: %v: %q", strings.Join(cmd.Args, " "), exitErr, exitErr.Stderr)
}
return errors.Wrap(err, "mkdir")
}
cmd = exec.Command("sudo", "cp", "-fp", tf.Name(), resolverFile)
_, err = cmd.Output()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
return fmt.Errorf("%q failed: %v: %q", strings.Join(cmd.Args, " "), exitErr, exitErr.Stderr)
}
return errors.Wrap(err, "copy")
}
glog.Infof("DNS forwarding now configured in %q", resolverFile)
return nil
}

View File

@ -177,7 +177,7 @@ func (p *BuildrootProvisioner) Provision(swarmOptions swarm.Options, authOptions
return nil
}
err := retry.Expo(configAuth, time.Second, 2*time.Minute)
err := retry.Expo(configAuth, 100*time.Microsecond, 2*time.Minute)
if err != nil {
glog.Infof("Error configuring auth during provisioning %v", err)
return err

View File

@ -82,7 +82,7 @@ func configureAuth(p miniProvisioner) error {
glog.Infof("configureAuth start")
start := time.Now()
defer func() {
glog.Infof("configureAuth took %s", time.Since(start))
glog.Infof("duration metric: configureAuth took %s", time.Since(start))
}()
driver := p.GetDriver()
@ -292,7 +292,7 @@ func updateUnit(p provision.SSHCommander, name string, content string, dst strin
if _, err := p.SSHCommand(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s.new", path.Dir(dst), content, dst)); err != nil {
return err
}
if _, err := p.SSHCommand(fmt.Sprintf("sudo diff -u %s %s.new || { sudo mv %s.new %s; sudo systemctl -f daemon-reload && sudo sudo systemctl -f restart %s; }", dst, dst, dst, dst, name)); err != nil {
if _, err := p.SSHCommand(fmt.Sprintf("sudo diff -u %s %s.new || { sudo mv %s.new %s; sudo systemctl -f daemon-reload && sudo systemctl -f enable %s && sudo systemctl -f restart %s; }", dst, dst, dst, dst, name, name)); err != nil {
return err
}
return nil

View File

@ -180,7 +180,7 @@ func (p *UbuntuProvisioner) Provision(swarmOptions swarm.Options, authOptions au
return nil
}
err := retry.Expo(configAuth, time.Second, 2*time.Minute)
err := retry.Expo(configAuth, 100*time.Microsecond, 2*time.Minute)
if err != nil {
glog.Infof("Error configuring auth during provisioning %v", err)

View File

@ -30,10 +30,15 @@ import (
func TestGenerateCACert(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "")
defer func() { //clean up tempdir
err := os.RemoveAll(tmpDir)
if err != nil {
t.Errorf("failed to clean up temp folder %q", tmpDir)
}
}()
if err != nil {
t.Fatalf("Error generating tmpdir: %v", err)
}
defer os.RemoveAll(tmpDir)
certPath := filepath.Join(tmpDir, "cert")
keyPath := filepath.Join(tmpDir, "key")
@ -58,16 +63,26 @@ func TestGenerateCACert(t *testing.T) {
func TestGenerateSignedCert(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "")
defer func() { //clean up tempdir
err := os.RemoveAll(tmpDir)
if err != nil {
t.Errorf("failed to clean up temp folder %q", tmpDir)
}
}()
if err != nil {
t.Fatalf("Error generating tmpdir: %v", err)
}
defer os.RemoveAll(tmpDir)
signerTmpDir, err := ioutil.TempDir("", "")
defer func() { //clean up tempdir
err := os.RemoveAll(signerTmpDir)
if err != nil {
t.Errorf("failed to clean up temp folder %q", signerTmpDir)
}
}()
if err != nil {
t.Fatalf("Error generating signer tmpdir: %v", err)
}
defer os.RemoveAll(signerTmpDir)
validSignerCertPath := filepath.Join(signerTmpDir, "cert")
validSignerKeyPath := filepath.Join(signerTmpDir, "key")

View File

@ -40,7 +40,7 @@ body, p {
}
h1,h2,h3,h4,h5 {
h1,h2,h3,h4,h5,.navbar-brand {
font-family: 'Open Sans', sans-serif !important;
}

Some files were not shown because too many files have changed in this diff Show More