Merge branch 'master' into podman-sudo

pull/7631/head
Anders F Björklund 2020-04-25 18:05:33 +02:00
commit 6644c5cc64
143 changed files with 3272 additions and 1430 deletions

View File

@ -1,10 +1,12 @@
name: MasterCI
name: Master
on:
push:
branches:
- master
- master
paths:
- '**.go'
- "**.go"
- "**.yml"
- "Makefile"
env:
GOPROXY: https://proxy.golang.org
jobs:
@ -13,59 +15,58 @@ jobs:
build_minikube:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: Download Dependencies
run : go mod download
- name: Build Binaries
run : |
make minikube-linux-amd64
make e2e-linux-amd64
cp -r test/integration/testdata ./out
whoami
echo github ref $GITHUB_REF
echo workflow $GITHUB_WORKFLOW
echo home $HOME
echo event name $GITHUB_EVENT_NAME
echo workspace $GITHUB_WORKSPACE
echo "end of debug stuff"
echo $(which jq)
- uses: actions/upload-artifact@v1
with:
name: minikube_binaries
path: out
- uses: actions/checkout@v2
- name: Download Dependencies
run: go mod download
- name: Build Binaries
run: |
make minikube-linux-amd64
make e2e-linux-amd64
cp -r test/integration/testdata ./out
whoami
echo github ref $GITHUB_REF
echo workflow $GITHUB_WORKFLOW
echo home $HOME
echo event name $GITHUB_EVENT_NAME
echo workspace $GITHUB_WORKSPACE
echo "end of debug stuff"
echo $(which jq)
- uses: actions/upload-artifact@v1
with:
name: minikube_binaries
path: out
lint:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: Install libvirt
run : |
sudo apt-get update
sudo apt-get install -y libvirt-dev
- name: Download Dependencies
run : go mod download
- name: Lint
env:
TESTSUITE: lintall
run : make test
continue-on-error: false
- uses: actions/checkout@v2
- name: Install libvirt
run: |
sudo apt-get update
sudo apt-get install -y libvirt-dev
- name: Download Dependencies
run: go mod download
- name: Lint
env:
TESTSUITE: lintall
run: make test
continue-on-error: false
unit_test:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: Install libvirt
run : |
sudo apt-get update
sudo apt-get install -y libvirt-dev
- name: Download Dependencies
run : go mod download
- name: Unit Test
env:
TESTSUITE: unittest
run :
make test
continue-on-error: false
- uses: actions/checkout@v2
- name: Install libvirt
run: |
sudo apt-get update
sudo apt-get install -y libvirt-dev
- name: Download Dependencies
run: go mod download
- name: Unit Test
env:
TESTSUITE: unittest
run: make test
continue-on-error: false
# Run the following integration tests after the build_minikube
# They will run in parallel and use the binaries in previous step
# They will run in parallel and use the binaries in previous step
functional_test_docker_ubuntu:
needs: [build_minikube]
env:
@ -74,89 +75,89 @@ jobs:
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-18.04
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: false
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.run TestFunctional -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: functional_test_docker_ubuntu
path: minikube_binaries/report
- name: The End Result functional_test_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: false
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.run TestFunctional -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: functional_test_docker_ubuntu
path: minikube_binaries/report
- name: The End Result functional_test_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
addons_certs_tests_docker_ubuntu:
runs-on: ubuntu-18.04
env:
@ -166,88 +167,88 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
needs: [build_minikube]
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestAddons|TestCertOptions)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: addons_certs_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - addons_certs_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestAddons|TestCertOptions)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: addons_certs_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - addons_certs_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
multinode_pause_tests_docker_ubuntu:
runs-on: ubuntu-18.04
env:
@ -257,88 +258,88 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
needs: [build_minikube]
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPause|TestMultiNode)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: multinode_pause_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - multinode_pause_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPause|TestMultiNode)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: multinode_pause_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - multinode_pause_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
preload_docker_flags_tests_docker_ubuntu:
runs-on: ubuntu-18.04
env:
@ -348,88 +349,88 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
needs: [build_minikube]
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPreload|TestDockerFlags)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: preload_docker_flags_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - preload_docker_flags_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPreload|TestDockerFlags)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: preload_docker_flags_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - preload_docker_flags_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
functional_baremetal_ubuntu18_04:
needs: [build_minikube]
env:
@ -439,99 +440,106 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-18.04
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
# conntrack is required for kubernetes 1.18 and higher
# socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon
- name: Install tools for none
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install conntrack
sudo apt-get -qq -y install socat
VERSION="v1.17.0"
curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz
sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.run TestFunctional -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: none_ubuntu18_04
path: minikube_binaries/report
- name: The End Result - None on Ubuntu 18:04
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
# conntrack is required for kubernetes 1.18 and higher
# socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon
- name: Install tools for none
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install conntrack
sudo apt-get -qq -y install socat
VERSION="v1.17.0"
curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz
sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.run TestFunctional -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: none_ubuntu18_04
path: minikube_binaries/report
- name: The End Result - None on Ubuntu 18:04
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
# After all integration tests finished
# collect all the reports and upload them
upload_all_reports:
if: always()
needs: [functional_test_docker_ubuntu, addons_certs_tests_docker_ubuntu, multinode_pause_tests_docker_ubuntu, preload_docker_flags_tests_docker_ubuntu, functional_baremetal_ubuntu18_04]
needs:
[
functional_test_docker_ubuntu,
addons_certs_tests_docker_ubuntu,
multinode_pause_tests_docker_ubuntu,
preload_docker_flags_tests_docker_ubuntu,
functional_baremetal_ubuntu18_04,
]
runs-on: ubuntu-18.04
steps:
- name: download all reports
uses: actions/download-artifact@v2-preview
- name: upload all reports
shell: bash {0}
continue-on-error: true
run: |
mkdir -p all_reports
ls -lah
cp -r ./functional_test_docker_ubuntu ./all_reports/
cp -r ./addons_certs_tests_docker_ubuntu ./all_reports/
cp -r ./multinode_pause_tests_docker_ubuntu ./all_reports/
cp -r ./preload_docker_flags_tests_docker_ubuntu ./all_reports/
cp -r ./functional_baremetal_ubuntu18_04 ./all_reports/
- uses: actions/upload-artifact@v1
with:
name: all_reports
path: all_reports
- name: download all reports
uses: actions/download-artifact@v2-preview
- name: upload all reports
shell: bash {0}
continue-on-error: true
run: |
mkdir -p all_reports
ls -lah
cp -r ./functional_test_docker_ubuntu ./all_reports/
cp -r ./addons_certs_tests_docker_ubuntu ./all_reports/
cp -r ./multinode_pause_tests_docker_ubuntu ./all_reports/
cp -r ./preload_docker_flags_tests_docker_ubuntu ./all_reports/
cp -r ./functional_baremetal_ubuntu18_04 ./all_reports/
- uses: actions/upload-artifact@v1
with:
name: all_reports
path: all_reports

View File

@ -1,8 +1,10 @@
name: CI
name: PR
on:
pull_request:
paths:
- '**.go'
- "**.go"
- "**.yml"
- "Makefile"
env:
GOPROXY: https://proxy.golang.org
jobs:
@ -11,59 +13,58 @@ jobs:
build_minikube:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: Download Dependencies
run : go mod download
- name: Build Binaries
run : |
make minikube-linux-amd64
make e2e-linux-amd64
cp -r test/integration/testdata ./out
whoami
echo github ref $GITHUB_REF
echo workflow $GITHUB_WORKFLOW
echo home $HOME
echo event name $GITHUB_EVENT_NAME
echo workspace $GITHUB_WORKSPACE
echo "end of debug stuff"
echo $(which jq)
- uses: actions/upload-artifact@v1
with:
name: minikube_binaries
path: out
- uses: actions/checkout@v2
- name: Download Dependencies
run: go mod download
- name: Build Binaries
run: |
make minikube-linux-amd64
make e2e-linux-amd64
cp -r test/integration/testdata ./out
whoami
echo github ref $GITHUB_REF
echo workflow $GITHUB_WORKFLOW
echo home $HOME
echo event name $GITHUB_EVENT_NAME
echo workspace $GITHUB_WORKSPACE
echo "end of debug stuff"
echo $(which jq)
- uses: actions/upload-artifact@v1
with:
name: minikube_binaries
path: out
lint:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: Install libvirt
run : |
sudo apt-get update
sudo apt-get install -y libvirt-dev
- name: Download Dependencies
run : go mod download
- name: Lint
env:
TESTSUITE: lintall
run : make test
continue-on-error: false
- uses: actions/checkout@v2
- name: Install libvirt
run: |
sudo apt-get update
sudo apt-get install -y libvirt-dev
- name: Download Dependencies
run: go mod download
- name: Lint
env:
TESTSUITE: lintall
run: make test
continue-on-error: false
unit_test:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: Install libvirt
run : |
sudo apt-get update
sudo apt-get install -y libvirt-dev
- name: Download Dependencies
run : go mod download
- name: Unit Test
env:
TESTSUITE: unittest
run :
make test
continue-on-error: false
- uses: actions/checkout@v2
- name: Install libvirt
run: |
sudo apt-get update
sudo apt-get install -y libvirt-dev
- name: Download Dependencies
run: go mod download
- name: Unit Test
env:
TESTSUITE: unittest
run: make test
continue-on-error: false
# Run the following integration tests after the build_minikube
# They will run in parallel and use the binaries in previous step
# They will run in parallel and use the binaries in previous step
functional_test_docker_ubuntu:
needs: [build_minikube]
env:
@ -72,89 +73,89 @@ jobs:
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-18.04
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: false
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.run TestFunctional -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: functional_test_docker_ubuntu
path: minikube_binaries/report
- name: The End Result functional_test_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: false
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.run TestFunctional -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: functional_test_docker_ubuntu
path: minikube_binaries/report
- name: The End Result functional_test_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
addons_certs_tests_docker_ubuntu:
runs-on: ubuntu-18.04
env:
@ -164,88 +165,88 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
needs: [build_minikube]
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestAddons|TestCertOptions)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: addons_certs_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - addons_certs_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestAddons|TestCertOptions)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: addons_certs_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - addons_certs_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
multinode_pause_tests_docker_ubuntu:
runs-on: ubuntu-18.04
env:
@ -255,88 +256,88 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
needs: [build_minikube]
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPause|TestMultiNode)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: multinode_pause_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - multinode_pause_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPause|TestMultiNode)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: multinode_pause_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - multinode_pause_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
preload_docker_flags_tests_docker_ubuntu:
runs-on: ubuntu-18.04
env:
@ -346,88 +347,88 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
needs: [build_minikube]
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPreload|TestDockerFlags)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: preload_docker_flags_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - preload_docker_flags_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPreload|TestDockerFlags)" -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: preload_docker_flags_tests_docker_ubuntu
path: minikube_binaries/report
- name: The End Result - preload_docker_flags_tests_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
functional_baremetal_ubuntu18_04:
needs: [build_minikube]
env:
@ -437,99 +438,106 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-18.04
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
# conntrack is required for kubernetes 1.18 and higher
# socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon
- name: Install tools for none
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install conntrack
sudo apt-get -qq -y install socat
VERSION="v1.17.0"
curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz
sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.v -timeout-multiplier=1.5 -test.run TestFunctional -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: none_ubuntu18_04
path: minikube_binaries/report
- name: The End Result - None on Ubuntu 18:04
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
# conntrack is required for kubernetes 1.18 and higher
# socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon
- name: Install tools for none
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install conntrack
sudo apt-get -qq -y install socat
VERSION="v1.17.0"
curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz
sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.19/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: true
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.v -timeout-multiplier=1.5 -test.run TestFunctional -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
echo ::set-env name=STAT::${STAT}
- uses: actions/upload-artifact@v1
with:
name: none_ubuntu18_04
path: minikube_binaries/report
- name: The End Result - None on Ubuntu 18:04
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
echo "----------------${numFail} Failures----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*** $numPass Passed ***"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
# After all integration tests finished
# collect all the reports and upload them
upload_all_reports:
if: always()
needs: [functional_test_docker_ubuntu, addons_certs_tests_docker_ubuntu, multinode_pause_tests_docker_ubuntu, preload_docker_flags_tests_docker_ubuntu, functional_baremetal_ubuntu18_04]
needs:
[
functional_test_docker_ubuntu,
addons_certs_tests_docker_ubuntu,
multinode_pause_tests_docker_ubuntu,
preload_docker_flags_tests_docker_ubuntu,
functional_baremetal_ubuntu18_04,
]
runs-on: ubuntu-18.04
steps:
- name: download all reports
uses: actions/download-artifact@v2-preview
- name: upload all reports
shell: bash {0}
continue-on-error: true
run: |
mkdir -p all_reports
ls -lah
cp -r ./functional_test_docker_ubuntu ./all_reports/
cp -r ./addons_certs_tests_docker_ubuntu ./all_reports/
cp -r ./multinode_pause_tests_docker_ubuntu ./all_reports/
cp -r ./preload_docker_flags_tests_docker_ubuntu ./all_reports/
cp -r ./functional_baremetal_ubuntu18_04 ./all_reports/
- uses: actions/upload-artifact@v1
with:
name: all_reports
path: all_reports
- name: download all reports
uses: actions/download-artifact@v2-preview
- name: upload all reports
shell: bash {0}
continue-on-error: true
run: |
mkdir -p all_reports
ls -lah
cp -r ./functional_test_docker_ubuntu ./all_reports/
cp -r ./addons_certs_tests_docker_ubuntu ./all_reports/
cp -r ./multinode_pause_tests_docker_ubuntu ./all_reports/
cp -r ./preload_docker_flags_tests_docker_ubuntu ./all_reports/
cp -r ./functional_baremetal_ubuntu18_04 ./all_reports/
- uses: actions/upload-artifact@v1
with:
name: all_reports
path: all_reports

View File

@ -1,5 +1,94 @@
# Release Notes
## Version 1.10.0-beta.1 - 2020-04-22
Improvements:
* Skip preload download if --image-repository is set [#7707](https://github.com/kubernetes/minikube/pull/7707)
Bug Fixes:
* ISO: persistently mount /var/lib/containerd [#7843](https://github.com/kubernetes/minikube/pull/7843)
* docker/podman: fix delete -p not cleaning up & add integration test [#7819](https://github.com/kubernetes/minikube/pull/7819)
Huge thank you for this release towards our contributors:
- Anders F Björklund
- Kenta Iso
- Medya Ghazizadeh
- Prasad Katti
- Priya Wadhwa
- Sharif Elgamal
- Thomas Stromberg
- Tobias Klauser
## Version 1.10.0-beta.0 - 2020-04-20
Improvements:
* faster containerd start by preloading images [#7793](https://github.com/kubernetes/minikube/pull/7793)
* Add fish completion support [#7777](https://github.com/kubernetes/minikube/pull/7777)
* Behavior change: start with no arguments uses existing cluster config [#7449](https://github.com/kubernetes/minikube/pull/7449)
* conformance: add --wait=all, reduce quirks [#7716](https://github.com/kubernetes/minikube/pull/7716)
* Upgrade minimum supported k8s version to v1.12 [#7723](https://github.com/kubernetes/minikube/pull/7723)
* Add default CNI network for running wth podman [#7754](https://github.com/kubernetes/minikube/pull/7754)
* Behavior change: fallback to alternate drivers on failure [#7389](https://github.com/kubernetes/minikube/pull/7389)
* Add registry addon feature for docker on mac/windows [#7603](https://github.com/kubernetes/minikube/pull/7603)
* Check node pressure & new option "node_ready" for --wait flag [#7752](https://github.com/kubernetes/minikube/pull/7752)
* docker driver: Add Service & Tunnel features to windows [#7739](https://github.com/kubernetes/minikube/pull/7739)
* Add master node/worker node type to `minikube status` [#7586](https://github.com/kubernetes/minikube/pull/7586)
* Add new wait component apps_running [#7460](https://github.com/kubernetes/minikube/pull/7460)
* none: Add support for OpenRC init (Google CloudShell) [#7539](https://github.com/kubernetes/minikube/pull/7539)
* Upgrade falco-probe module to version 0.21.0 [#7436](https://github.com/kubernetes/minikube/pull/7436)
Bug Fixes:
* Fix multinode cluster creation for VM drivers [#7700](https://github.com/kubernetes/minikube/pull/7700)
* tunnel: Fix resolver file permissions, add DNS forwarding test [#7753](https://github.com/kubernetes/minikube/pull/7753)
* unconfine apparmor for kic [#7658](https://github.com/kubernetes/minikube/pull/7658)
* Fix `minikube delete` output nodename missing with docker/podman driver [#7553](https://github.com/kubernetes/minikube/pull/7553)
* Respect driver.FlagDefaults even if --extra-config is set [#7509](https://github.com/kubernetes/minikube/pull/7509)
* remove docker/podman overlay network for docker-runtime [#7425](https://github.com/kubernetes/minikube/pull/7425)
Huge thank you for this release towards our contributors:
- Alonyb
- Anders F Björklund
- Anshul Sirur
- Balint Pato
- Batuhan Apaydın
- Brad Walker
- Frank Schwichtenberg
- Kenta Iso
- Medya Ghazizadeh
- Michael Vorburger ⛑️
- Pablo Caderno
- Prasad Katti
- Priya Wadhwa
- Radoslaw Smigielski
- Ruben Baez
- Sharif Elgamal
- Thomas Strömberg
- Vikky Omkar
- ZouYu
- gorbondiga
- loftkun
- nestoralonso
- remraz
- sayboras
- tomocy
Thank you so much to users who helped with community triage:
- ps-feng
- Prasad Katti
And big thank you to those who participated in our docs fixit week:
- matjung
- jlaswell
- remraz
## Version 1.9.2 - 2020-04-04
Minor improvements:

View File

@ -14,8 +14,8 @@
# Bump these on release - and please check ISO_VERSION for correctness.
VERSION_MAJOR ?= 1
VERSION_MINOR ?= 9
VERSION_BUILD ?= 2
VERSION_MINOR ?= 10
VERSION_BUILD ?= 0-beta.1
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
VERSION ?= v$(RAW_VERSION)
@ -23,7 +23,7 @@ KUBERNETES_VERSION ?= $(shell egrep "DefaultKubernetesVersion =" pkg/minikube/co
KIC_VERSION ?= $(shell egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2)
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).0
ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
# Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta
DEB_VERSION ?= $(subst -,~,$(RAW_VERSION))
RPM_VERSION ?= $(DEB_VERSION)
@ -255,7 +255,7 @@ docker-machine-driver-hyperkit: out/docker-machine-driver-hyperkit ## Build Hype
docker-machine-driver-kvm2: out/docker-machine-driver-kvm2 ## Build KVM2 driver
.PHONY: integration
integration: out/minikube ## Trigger minikube integration test
integration: out/minikube$(IS_EXE) ## Trigger minikube integration test
go test -v -test.timeout=60m ./test/integration --tags="$(MINIKUBE_INTEGRATION_BUILD_TAGS)" $(TEST_ARGS)
.PHONY: integration-none-driver
@ -276,7 +276,7 @@ generate-docs: out/minikube ## Automatically generate commands documentation.
.PHONY: gotest
gotest: $(SOURCE_GENERATED) ## Trigger minikube test
go test -tags "$(MINIKUBE_BUILD_TAGS)" $(MINIKUBE_TEST_FILES)
go test -tags "$(MINIKUBE_BUILD_TAGS)" -ldflags="$(MINIKUBE_LDFLAGS)" $(MINIKUBE_TEST_FILES)
.PHONY: extract
extract: ## Compile extract tool
@ -397,6 +397,10 @@ reportcard: ## Run goreportcard for minikube
mdlint:
@$(MARKDOWNLINT) $(MINIKUBE_MARKDOWN_FILES)
.PHONY: verify-iso
verify-iso: # Make sure the current ISO exists in the expected bucket
gsutil stat gs://$(ISO_BUCKET)/minikube-$(ISO_VERSION).iso
out/docs/minikube.md: $(shell find "cmd") $(shell find "pkg/minikube/constants") pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go
go run -ldflags="$(MINIKUBE_LDFLAGS)" -tags gendocs hack/help_text/gen_help_text.go

View File

@ -28,7 +28,7 @@ import (
)
const longDescription = `
Outputs minikube shell completion for the given shell (bash or zsh)
Outputs minikube shell completion for the given shell (bash, zsh or fish)
This depends on the bash-completion binary. Example installation instructions:
OS X:
@ -37,15 +37,18 @@ const longDescription = `
$ minikube completion bash > ~/.minikube-completion # for bash users
$ minikube completion zsh > ~/.minikube-completion # for zsh users
$ source ~/.minikube-completion
$ minikube completion fish > ~/.config/fish/completions/minikube.fish # for fish users
Ubuntu:
$ apt-get install bash-completion
$ source /etc/bash-completion
$ source <(minikube completion bash) # for bash users
$ source <(minikube completion zsh) # for zsh users
$ minikube completion fish > ~/.config/fish/completions/minikube.fish # for fish users
Additionally, you may want to output the completion to a file and source in your .bashrc
Note for zsh users: [1] zsh completions are only supported in versions of zsh >= 5.2
Note for fish users: [2] please refer to this docs for more details https://fishshell.com/docs/current/#tab-completion
`
const boilerPlate = `
@ -66,24 +69,29 @@ const boilerPlate = `
var completionCmd = &cobra.Command{
Use: "completion SHELL",
Short: "Outputs minikube shell completion for the given shell (bash or zsh)",
Short: "Outputs minikube shell completion for the given shell (bash, zsh or fish)",
Long: longDescription,
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
exit.UsageT("Usage: minikube completion SHELL")
}
if args[0] != "bash" && args[0] != "zsh" {
if args[0] != "bash" && args[0] != "zsh" && args[0] != "fish" {
exit.UsageT("Sorry, completion support is not yet implemented for {{.name}}", out.V{"name": args[0]})
} else if args[0] == "bash" {
err := GenerateBashCompletion(os.Stdout, cmd.Parent())
if err != nil {
exit.WithError("bash completion failed", err)
}
} else {
} else if args[0] == "zsh" {
err := GenerateZshCompletion(os.Stdout, cmd.Parent())
if err != nil {
exit.WithError("zsh completion failed", err)
}
} else {
err := GenerateFishCompletion(os.Stdout, cmd.Parent())
if err != nil {
exit.WithError("fish completion failed", err)
}
}
},
@ -279,3 +287,18 @@ __minikube_bash_source <(__minikube_convert_bash_to_zsh)
return nil
}
// GenerateBashCompletion generates the completion for the bash shell
func GenerateFishCompletion(w io.Writer, cmd *cobra.Command) error {
_, err := w.Write([]byte(boilerPlate))
if err != nil {
return err
}
err = cmd.GenFishCompletion(w, true)
if err != nil {
return errors.Wrap(err, "Error generating fish completion")
}
return nil
}

View File

@ -18,8 +18,10 @@ package config
import (
"io/ioutil"
"net"
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/service"
@ -184,6 +186,30 @@ var addonsConfigureCmd = &cobra.Command{
out.WarningT("ERROR creating `registry-creds-acr` secret")
}
case "metallb":
profile := ClusterFlagValue()
cfg, err := config.Load(profile)
if err != nil {
out.ErrT(out.FatalType, "Failed to load config {{.profile}}", out.V{"profile": profile})
}
validator := func(s string) bool {
return net.ParseIP(s) != nil
}
if cfg.KubernetesConfig.LoadBalancerStartIP == "" {
cfg.KubernetesConfig.LoadBalancerStartIP = AskForStaticValidatedValue("-- Enter Load Balancer Start IP: ", validator)
}
if cfg.KubernetesConfig.LoadBalancerEndIP == "" {
cfg.KubernetesConfig.LoadBalancerEndIP = AskForStaticValidatedValue("-- Enter Load Balancer End IP: ", validator)
}
err = config.SaveProfile(profile, cfg)
if err != nil {
out.ErrT(out.FatalType, "Failed to save config {{.profile}}", out.V{"profile": profile})
}
default:
out.FailureT("{{.name}} has no available configuration options", out.V{"name": addon})
return

View File

@ -153,3 +153,23 @@ func posString(slice []string, element string) int {
func containsString(slice []string, element string) bool {
return posString(slice, element) != -1
}
// AskForStaticValidatedValue asks for a single value to enter and check for valid input
func AskForStaticValidatedValue(s string, validator func(s string) bool) string {
reader := bufio.NewReader(os.Stdin)
for {
response := getStaticValue(reader, s)
// Can't have zero length
if len(response) == 0 {
out.Err("--Error, please enter a value:")
continue
}
if !validator(response) {
out.Err("--Invalid input, please enter a value:")
continue
}
return response
}
}

View File

@ -147,6 +147,8 @@ func runDelete(cmd *cobra.Command, args []string) {
out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": cname})
}
deletePossibleKicLeftOver(cname)
errs := DeleteProfiles([]*config.Profile{profile})
if len(errs) > 0 {
HandleDeletionErrors(errs)
@ -189,20 +191,32 @@ func DeleteProfiles(profiles []*config.Profile) []error {
return errs
}
func deleteProfileContainersAndVolumes(name string) {
func deletePossibleKicLeftOver(name string) {
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, name)
errs := oci.DeleteContainersByLabel("env", oci.Docker, delLabel)
if errs != nil { // it will error if there is no container to delete
glog.Infof("error deleting containers for %s (might be okay):\n%v", name, errs)
}
errs = oci.DeleteAllVolumesByLabel("env", oci.Docker, delLabel)
if errs != nil { // it will not error if there is nothing to delete
glog.Warningf("error deleting volumes (might be okay).\nTo see the list of volumes run: 'docker volume ls'\n:%v", errs)
}
prefixes := []string{"env", "sudo"}
for i, bin := range []string{oci.Docker, oci.Podman} {
prefix := prefixes[i]
cs, err := oci.ListContainersByLabel(prefix, bin, delLabel)
if err == nil && len(cs) > 0 {
for _, c := range cs {
out.T(out.DeletingHost, `Deleting container "{{.name}}" ...`, out.V{"name": name})
err := oci.DeleteContainer(prefix, bin, c)
if err != nil { // it will error if there is no container to delete
glog.Errorf("error deleting container %q. you might want to delete that manually :\n%v", name, err)
}
errs = oci.PruneAllVolumesByLabel("env", oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error pruning volume (might be okay):\n%v", errs)
}
}
errs := oci.DeleteAllVolumesByLabel(prefix, bin, delLabel)
if errs != nil { // it will not error if there is nothing to delete
glog.Warningf("error deleting volumes (might be okay).\nTo see the list of volumes run: 'docker volume ls'\n:%v", errs)
}
errs = oci.PruneAllVolumesByLabel(prefix, bin, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error pruning volume (might be okay):\n%v", errs)
}
}
}
@ -212,7 +226,7 @@ func deleteProfile(profile *config.Profile) error {
// if driver is oci driver, delete containers and volumes
if driver.IsKIC(profile.Config.Driver) {
out.T(out.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": profile.Name, "driver_name": profile.Config.Driver})
deleteProfileContainersAndVolumes(profile.Name)
deletePossibleKicLeftOver(profile.Name)
}
}

View File

@ -75,7 +75,6 @@ var nodeStartCmd = &cobra.Command{
}
func init() {
nodeStartCmd.Flags().String("name", "", "The name of the node to start")
nodeStartCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
nodeCmd.AddCommand(nodeStartCmd)
}

View File

@ -53,6 +53,5 @@ var nodeStopCmd = &cobra.Command{
}
func init() {
nodeStopCmd.Flags().String("name", "", "The name of the node to delete")
nodeCmd.AddCommand(nodeStopCmd)
}

View File

@ -80,11 +80,6 @@ var serviceCmd = &cobra.Command{
cname := ClusterFlagValue()
co := mustload.Healthy(cname)
if driver.NeedsPortForward(co.Config.Driver) {
startKicServiceTunnel(svc, cname)
return
}
urls, err := service.WaitForService(co.API, co.Config.Name, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval)
if err != nil {
var s *service.SVCNotFoundError
@ -95,6 +90,11 @@ You may select another namespace by using 'minikube service {{.service}} -n <nam
exit.WithError("Error opening service", err)
}
if driver.NeedsPortForward(co.Config.Driver) {
startKicServiceTunnel(svc, cname)
return
}
openURLs(svc, urls)
},
}

View File

@ -60,7 +60,6 @@ import (
"k8s.io/minikube/pkg/minikube/registry"
"k8s.io/minikube/pkg/minikube/translate"
"k8s.io/minikube/pkg/util"
pkgutil "k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/version"
)
@ -751,7 +750,7 @@ func suggestMemoryAllocation(sysLimit int, containerLimit int) int {
// validateMemorySize validates the memory size matches the minimum recommended
func validateMemorySize() {
req, err := pkgutil.CalculateSizeInMB(viper.GetString(memory))
req, err := util.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
exit.WithCodeT(exit.Config, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
}
@ -787,7 +786,7 @@ func validateCPUCount(local bool) {
// validateFlags validates the supplied flags against known bad combinations
func validateFlags(cmd *cobra.Command, drvName string) {
if cmd.Flags().Changed(humanReadableDiskSize) {
diskSizeMB, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
diskSizeMB, err := util.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if err != nil {
exit.WithCodeT(exit.Config, "Validation unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
}
@ -997,7 +996,7 @@ func getKubernetesVersion(old *config.ClusterConfig) string {
}
if defaultVersion.GT(nvs) {
out.T(out.ThumbsUp, "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}", out.V{"new": defaultVersion})
out.T(out.New, "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}", out.V{"new": defaultVersion})
}
return nv
}

View File

@ -26,6 +26,7 @@ import (
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/drivers/kic"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
"k8s.io/minikube/pkg/minikube/config"
@ -98,6 +99,7 @@ const (
nodes = "nodes"
preload = "preload"
deleteOnFailure = "delete-on-failure"
kicBaseImage = "base-image"
)
// initMinikubeFlags includes commandline flags for minikube.
@ -118,6 +120,7 @@ func initMinikubeFlags() {
startCmd.Flags().Bool(downloadOnly, false, "If true, only download and cache files for later use - don't install or start anything.")
startCmd.Flags().Bool(cacheImages, true, "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none.")
startCmd.Flags().StringSlice(isoURL, download.DefaultISOURLs(), "Locations to fetch the minikube ISO from.")
startCmd.Flags().String(kicBaseImage, kic.BaseImage, "The base image to use for docker/podman drivers. Intended for local development.")
startCmd.Flags().Bool(keepContext, false, "This will keep the existing kubectl context and will create a minikube context.")
startCmd.Flags().Bool(embedCerts, false, "if true, will embed the certs in kubeconfig.")
startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd).")
@ -255,7 +258,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
if strings.ToLower(repository) == "auto" || mirrorCountry != "" {
found, autoSelectedRepository, err := selectImageRepository(mirrorCountry, semver.MustParse(strings.TrimPrefix(k8sVersion, version.VersionPrefix)))
if err != nil {
exit.WithError("Failed to check main repository and mirrors for images for images", err)
exit.WithError("Failed to check main repository and mirrors for images", err)
}
if !found {
@ -269,7 +272,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
repository = autoSelectedRepository
}
if cmd.Flags().Changed(imageRepository) {
if cmd.Flags().Changed(imageRepository) || cmd.Flags().Changed(imageMirrorCountry) {
out.T(out.SuccessType, "Using image repository {{.name}}", out.V{"name": repository})
}

View File

@ -104,12 +104,11 @@ var statusCmd = &cobra.Command{
cname := ClusterFlagValue()
api, cc := mustload.Partial(cname)
var st *Status
var err error
var statuses []*Status
for _, n := range cc.Nodes {
glog.Infof("checking status of %s ...", n.Name)
machineName := driver.MachineName(*cc, n)
st, err = status(api, *cc, n)
st, err := status(api, *cc, n)
glog.Infof("%s status: %+v", machineName, st)
if err != nil {
@ -118,36 +117,40 @@ var statusCmd = &cobra.Command{
if st.Host == Nonexistent {
glog.Errorf("The %q host does not exist!", machineName)
}
statuses = append(statuses, st)
}
switch strings.ToLower(output) {
case "text":
switch strings.ToLower(output) {
case "text":
for _, st := range statuses {
if err := statusText(st, os.Stdout); err != nil {
exit.WithError("status text failure", err)
}
case "json":
if err := statusJSON(st, os.Stdout); err != nil {
exit.WithError("status json failure", err)
}
default:
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
}
case "json":
if err := statusJSON(statuses, os.Stdout); err != nil {
exit.WithError("status json failure", err)
}
default:
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
}
// TODO: Update for multi-node
os.Exit(exitCode(st))
os.Exit(exitCode(statuses))
},
}
func exitCode(st *Status) int {
func exitCode(statuses []*Status) int {
c := 0
if st.Host != state.Running.String() {
c |= minikubeNotRunningStatusFlag
}
if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() {
c |= clusterNotRunningStatusFlag
}
if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant {
c |= k8sNotRunningStatusFlag
for _, st := range statuses {
if st.Host != state.Running.String() {
c |= minikubeNotRunningStatusFlag
}
if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() {
c |= clusterNotRunningStatusFlag
}
if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant {
c |= k8sNotRunningStatusFlag
}
}
return c
}
@ -270,8 +273,15 @@ func statusText(st *Status, w io.Writer) error {
return nil
}
func statusJSON(st *Status, w io.Writer) error {
js, err := json.Marshal(st)
func statusJSON(st []*Status, w io.Writer) error {
var js []byte
var err error
// Keep backwards compat with single node clusters to not break anyone
if len(st) == 1 {
js, err = json.Marshal(st[0])
} else {
js, err = json.Marshal(st)
}
if err != nil {
return err
}

View File

@ -35,7 +35,7 @@ func TestExitCode(t *testing.T) {
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := exitCode(tc.state)
got := exitCode([]*Status{tc.state})
if got != tc.want {
t.Errorf("exitcode(%+v) = %d, want: %d", tc.state, got, tc.want)
}
@ -93,7 +93,7 @@ func TestStatusJSON(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
var b bytes.Buffer
err := statusJSON(tc.state, &b)
err := statusJSON([]*Status{tc.state}, &b)
if err != nil {
t.Errorf("json(%+v) error: %v", tc.state, err)
}

View File

@ -90,7 +90,7 @@ spec:
containers:
- name: kubernetes-dashboard
# WARNING: This must match pkg/minikube/bootstrapper/images/images.go
image: kubernetesui/dashboard:v2.0.0-rc6
image: kubernetesui/dashboard:v2.0.0
ports:
- containerPort: 9090
protocol: TCP

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- {{ .LoadBalancerStartIP }}-{{ .LoadBalancerEndIP }}

View File

@ -0,0 +1,293 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app: metallb
name: metallb-system
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
labels:
app: metallb
name: speaker
namespace: metallb-system
spec:
allowPrivilegeEscalation: false
allowedCapabilities:
- NET_ADMIN
- NET_RAW
- SYS_ADMIN
fsGroup:
rule: RunAsAny
hostNetwork: true
hostPorts:
- max: 7472
min: 7472
privileged: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- '*'
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: metallb
name: speaker
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:controller
rules:
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- update
- apiGroups:
- ''
resources:
- services/status
verbs:
- update
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:speaker
rules:
- apiGroups:
- ''
resources:
- services
- endpoints
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- extensions
resourceNames:
- speaker
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:controller
subjects:
- kind: ServiceAccount
name: controller
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:speaker
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:speaker
subjects:
- kind: ServiceAccount
name: speaker
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: config-watcher
subjects:
- kind: ServiceAccount
name: controller
- kind: ServiceAccount
name: speaker
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: metallb
component: speaker
name: speaker
namespace: metallb-system
spec:
selector:
matchLabels:
app: metallb
component: speaker
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
app: metallb
component: speaker
spec:
containers:
- args:
- --port=7472
- --config=config
env:
- name: METALLB_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: METALLB_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
image: metallb/speaker:v0.8.2
imagePullPolicy: IfNotPresent
name: speaker
ports:
- containerPort: 7472
name: monitoring
resources:
limits:
cpu: 100m
memory: 100Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_ADMIN
- NET_RAW
- SYS_ADMIN
drop:
- ALL
readOnlyRootFilesystem: true
hostNetwork: true
nodeSelector:
beta.kubernetes.io/os: linux
serviceAccountName: speaker
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: metallb
component: controller
name: controller
namespace: metallb-system
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app: metallb
component: controller
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
app: metallb
component: controller
spec:
containers:
- args:
- --port=7472
- --config=config
image: metallb/controller:v0.8.2
imagePullPolicy: IfNotPresent
name: controller
ports:
- containerPort: 7472
name: monitoring
resources:
limits:
cpu: 100m
memory: 100Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: true
nodeSelector:
beta.kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: controller
terminationGracePeriodSeconds: 0

View File

@ -101,6 +101,10 @@ if [ -n "$BOOT2DOCKER_DATA" ]; then
mkdir -p /var/lib/docker
mount --bind /mnt/$PARTNAME/var/lib/docker /var/lib/docker
mkdir -p /mnt/$PARTNAME/var/lib/containerd
mkdir -p /var/lib/containerd
mount --bind /mnt/$PARTNAME/var/lib/containerd /var/lib/containerd
mkdir -p /mnt/$PARTNAME/var/lib/containers
mkdir -p /var/lib/containers
mount --bind /mnt/$PARTNAME/var/lib/containers /var/lib/containers
@ -109,6 +113,10 @@ if [ -n "$BOOT2DOCKER_DATA" ]; then
mkdir /var/log
mount --bind /mnt/$PARTNAME/var/log /var/log
mkdir -p /mnt/$PARTNAME/var/tmp
mkdir /var/tmp
mount --bind /mnt/$PARTNAME/var/tmp /var/tmp
mkdir -p /mnt/$PARTNAME/var/lib/kubelet
mkdir /var/lib/kubelet
mount --bind /mnt/$PARTNAME/var/lib/kubelet /var/lib/kubelet

View File

@ -29,6 +29,8 @@ endef
define PODMAN_INSTALL_TARGET_CMDS
$(INSTALL) -Dm755 $(@D)/bin/podman $(TARGET_DIR)/usr/bin/podman
$(INSTALL) -d -m 755 $(TARGET_DIR)/etc/cni/net.d/
$(INSTALL) -m 644 $(@D)/cni/87-podman-bridge.conflist $(TARGET_DIR)/etc/cni/net.d/87-podman-bridge.conflist
endef
$(eval $(generic-package))

2
go.mod
View File

@ -70,7 +70,7 @@ require (
github.com/samalba/dockerclient v0.0.0-20160414174713-91d7393ff859 // indirect
github.com/shirou/gopsutil v2.18.12+incompatible
github.com/spf13/cast v1.3.1 // indirect
github.com/spf13/cobra v0.0.5
github.com/spf13/cobra v1.0.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.6.1
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect

9
go.sum
View File

@ -156,6 +156,8 @@ github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfc
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
@ -706,6 +708,8 @@ github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNue
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday v1.5.3-0.20200218234912-41c5fccfd6f6 h1:tlXG832s5pa9x9Gs3Rp2rTvEqjiDEuETUOSfBEiTcns=
github.com/russross/blackfriday v1.5.3-0.20200218234912-41c5fccfd6f6/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sayboras/dockerclient v0.0.0-20191231050035-015626177a97 h1:DWY4yZN6w+FSKMeqBBXaalT8zmCn4DVwBGopShnlwFE=
@ -722,6 +726,8 @@ github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+D
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@ -750,6 +756,8 @@ github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
@ -764,6 +772,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.6.1 h1:VPZzIkznI1YhVMRi6vNFLHSwhnhReBfgTxIPccpfdZk=
github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY=

View File

@ -27,7 +27,7 @@ set -e
OS_ARCH="linux-amd64"
VM_DRIVER="podman"
JOB_NAME="Podman_Linux"
JOB_NAME="Experimental_Podman_Linux"
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"

View File

@ -38,6 +38,10 @@ grep -E "^VERSION_BUILD \\?=" Makefile | grep "${VERSION_BUILD}"
# Force go packages to the Jekins home directory
export GOPATH=$HOME/go
# Verify ISO exists
echo "Verifying ISO exists ..."
make verify-iso
# Build and upload
env BUILD_IN_DOCKER=y \
make -j 16 \

View File

@ -75,34 +75,13 @@ github-release -v release ${RELEASE_FLAGS} \
--name "${TAGNAME}" \
--description "${DESCRIPTION}"
# Uploading the files into github
FILES_TO_UPLOAD=(
'minikube-linux-amd64'
'minikube-linux-amd64.sha256'
'minikube-darwin-amd64'
'minikube-darwin-amd64.sha256'
'minikube-windows-amd64.exe'
'minikube-windows-amd64.exe.sha256'
'minikube-installer.exe'
"minikube_${DEB_VERSION}-0_amd64.deb"
"minikube-${RPM_VERSION}-0.x86_64.rpm"
'docker-machine-driver-kvm2'
'docker-machine-driver-kvm2.sha256'
'docker-machine-driver-hyperkit'
'docker-machine-driver-hyperkit.sha256'
)
# ISO files are special, as they are generated pre-release tagging
ISO_FILES=("minikube-v${VERSION}.iso" "minikube-v${VERSION}.iso.sha256")
for DOWNLOAD in "${ISO_FILES[@]}"
do
gsutil cp "gs://${ISO_BUCKET}/${DOWNLOAD}" out/ \
&& FILES_TO_UPLOAD+=("${DOWNLOAD}") \
|| echo "${DOWNLOAD} was not generated for this release"
# ISO files are built from a separate process, and may not be included in this release
for path in $(gsutil ls "gs://${ISO_BUCKET}/minikube-v${VERSION}*" || true); do
gsutil cp "${path}" out/
done
for UPLOAD in "${FILES_TO_UPLOAD[@]}"
do
# Upload all end-user assets other than preload files, as they are release independent
for file in out/minikube[_-]* out/docker-machine-*; do
n=0
until [ $n -ge 5 ]
do
@ -110,8 +89,8 @@ do
--user "${GITHUB_ORGANIZATION}" \
--repo "${GITHUB_REPO}" \
--tag "${TAGNAME}" \
--name "$UPLOAD" \
--file "out/$UPLOAD" && break
--name "$(basename ${file})" \
--file "${file}" && break
n=$((n+1))
sleep 15
done

View File

@ -21,6 +21,7 @@ import (
"os"
"os/exec"
"path/filepath"
"time"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/drivers/kic"
@ -29,21 +30,17 @@ import (
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/sysinit"
"k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/util/retry"
)
func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string) error {
defer func() {
if err := deleteMinikube(); err != nil {
fmt.Println(err)
}
}()
driver := kic.NewDriver(kic.Config{
KubernetesVersion: kubernetesVersion,
ContainerRuntime: driver.Docker,
ContainerRuntime: containerRuntime,
OCIPrefix: "env",
OCIBinary: oci.Docker,
MachineName: profile,
@ -69,44 +66,94 @@ func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string
if err != nil {
return errors.Wrap(err, "kubeadm images")
}
if containerRuntime != "docker" { // kic overlay image is only needed by containerd and cri-o https://github.com/kubernetes/minikube/issues/7428
imgs = append(imgs, kic.OverlayImage)
}
runner := command.NewKICRunner(profile, driver.OCIPrefix, driver.OCIBinary)
// will need to do this to enable the container run-time service
sv, err := util.ParseKubernetesVersion(kubernetesVersion)
if err != nil {
return errors.Wrap(err, "Failed to parse kubernetes version")
}
co := cruntime.Config{
Type: containerRuntime,
Runner: runner,
ImageRepository: "",
KubernetesVersion: sv, // this is just to satisfy cruntime and shouldnt matter what version.
}
cr, err := cruntime.New(co)
if err != nil {
return errors.Wrap(err, "failed create new runtime")
}
if err := cr.Enable(true); err != nil {
return errors.Wrap(err, "enable container runtime")
}
for _, img := range imgs {
cmd := exec.Command("docker", "exec", profile, "docker", "pull", img)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return errors.Wrapf(err, "downloading %s", img)
pull := func() error {
cmd := imagePullCommand(containerRuntime, img)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
time.Sleep(time.Second) // to avoid error: : exec: already started
return errors.Wrapf(err, "pulling image %s", img)
}
return nil
}
// retry up to 5 times if network is bad
if err = retry.Expo(pull, time.Microsecond, time.Minute, 5); err != nil {
return errors.Wrapf(err, "pull image %s", img)
}
}
// Transfer in k8s binaries
kcfg := config.KubernetesConfig{
KubernetesVersion: kubernetesVersion,
}
runner := command.NewKICRunner(profile, driver.OCIPrefix, driver.OCIBinary)
sm := sysinit.New(runner)
if err := bsutil.TransferBinaries(kcfg, runner, sm); err != nil {
return errors.Wrap(err, "transferring k8s binaries")
}
// Create image tarball
if err := createImageTarball(tarballFilename); err != nil {
if err := createImageTarball(tarballFilename, containerRuntime); err != nil {
return errors.Wrap(err, "create tarball")
}
return copyTarballToHost(tarballFilename)
}
func createImageTarball(tarballFilename string) error {
// returns the right command to pull image for a specific runtime
func imagePullCommand(containerRuntime, img string) *exec.Cmd {
if containerRuntime == "docker" {
return exec.Command("docker", "exec", profile, "docker", "pull", img)
}
if containerRuntime == "containerd" {
return exec.Command("docker", "exec", profile, "sudo", "crictl", "pull", img)
}
return nil
}
func createImageTarball(tarballFilename, containerRuntime string) error {
// directories to save into tarball
dirs := []string{
fmt.Sprintf("./lib/docker/%s", dockerStorageDriver),
"./lib/docker/image",
"./lib/minikube/binaries",
}
if containerRuntime == "docker" {
dirs = append(dirs, fmt.Sprintf("./lib/docker/%s", dockerStorageDriver), "./lib/docker/image")
}
if containerRuntime == "containerd" {
dirs = append(dirs, fmt.Sprintf("./lib/containerd"))
}
args := []string{"exec", profile, "sudo", "tar", "-I", "lz4", "-C", "/var", "-cvf", tarballFilename}
args = append(args, dirs...)
cmd := exec.Command("docker", args...)
@ -128,7 +175,7 @@ func copyTarballToHost(tarballFilename string) error {
}
func deleteMinikube() error {
cmd := exec.Command(minikubePath, "delete", "-p", profile)
cmd := exec.Command(minikubePath, "delete", "-p", profile) // to avoid https://github.com/kubernetes/minikube/issues/7814
cmd.Stdout = os.Stdout
return cmd.Run()
}

View File

@ -20,12 +20,13 @@ import (
"bytes"
"flag"
"fmt"
"os"
"os/exec"
"runtime/debug"
"strings"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/exit"
)
const (
@ -35,7 +36,7 @@ const (
var (
dockerStorageDriver = "overlay2"
containerRuntimes = []string{"docker"}
containerRuntimes = []string{"docker", "containerd"}
k8sVersion string
k8sVersions []string
)
@ -50,14 +51,24 @@ func init() {
}
func main() {
defer func() {
if err := deleteMinikube(); err != nil {
fmt.Printf("error cleaning up minikube: %v \n", err)
}
}()
if err := deleteMinikube(); err != nil {
fmt.Printf("error cleaning up minikube at start up: %v \n", err)
}
if err := verifyDockerStorage(); err != nil {
exit.WithError("Docker storage type is incompatible: %v\n", err)
exit("Docker storage type is incompatible: %v \n", err)
}
if k8sVersions == nil {
var err error
k8sVersions, err = RecentK8sVersions()
if err != nil {
exit.WithError("Unable to get recent k8s versions: %v\n", err)
exit("Unable to get recent k8s versions: %v\n", err)
}
}
@ -65,16 +76,21 @@ func main() {
for _, cr := range containerRuntimes {
tf := download.TarballName(kv, cr)
if download.PreloadExists(kv, cr) {
fmt.Printf("A preloaded tarball for k8s version %s already exists, skipping generation.\n", kv)
fmt.Printf("A preloaded tarball for k8s version %s - runtime %q already exists, skipping generation.\n", kv, cr)
continue
}
fmt.Printf("A preloaded tarball for k8s version %s doesn't exist, generating now...\n", kv)
fmt.Printf("A preloaded tarball for k8s version %s - runtime %q doesn't exist, generating now...\n", kv, cr)
if err := generateTarball(kv, cr, tf); err != nil {
exit.WithError(fmt.Sprintf("generating tarball for k8s version %s with %s", kv, cr), err)
exit(fmt.Sprintf("generating tarball for k8s version %s with %s", kv, cr), err)
}
if err := uploadTarball(tf); err != nil {
exit.WithError(fmt.Sprintf("uploading tarball for k8s version %s with %s", kv, cr), err)
exit(fmt.Sprintf("uploading tarball for k8s version %s with %s", kv, cr), err)
}
if err := deleteMinikube(); err != nil {
fmt.Printf("error cleaning up minikube before finishing up: %v\n", err)
}
}
}
}
@ -93,3 +109,12 @@ func verifyDockerStorage() error {
}
return nil
}
// exit will exit and clean up minikube
func exit(msg string, err error) {
fmt.Printf("WithError(%s)=%v called from:\n%s", msg, err, debug.Stack())
if err := deleteMinikube(); err != nil {
fmt.Printf("error cleaning up minikube at start up: %v\n", err)
}
os.Exit(60)
}

View File

@ -129,4 +129,9 @@ var Addons = []*Addon{
set: SetBool,
callbacks: []setFn{enableOrDisableStorageClasses},
},
{
name: "metallb",
set: SetBool,
callbacks: []setFn{enableOrDisableAddon},
},
}

View File

@ -102,7 +102,7 @@ func (d *Driver) Create() error {
},
)
exists, err := oci.ContainerExists(d.OCIPrefix, d.OCIBinary, params.Name)
exists, err := oci.ContainerExists(d.OCIPrefix, d.OCIBinary, params.Name, true)
if err != nil {
glog.Warningf("failed to check if container already exists: %v", err)
}
@ -133,7 +133,7 @@ func (d *Driver) Create() error {
return
}
t := time.Now()
glog.Infof("Starting extracting preloaded images to volume")
glog.Infof("Starting extracting preloaded images to volume ...")
// Extract preloaded images to container
if err := oci.ExtractTarballToVolume(download.TarballPath(d.NodeConfig.KubernetesVersion, d.NodeConfig.ContainerRuntime), params.Name, BaseImage); err != nil {
glog.Infof("Unable to extract preloaded tarball to volume: %v", err)
@ -237,28 +237,7 @@ func (d *Driver) GetURL() (string, error) {
// GetState returns the state that the host is in (running, stopped, etc)
func (d *Driver) GetState() (state.State, error) {
out, err := oci.WarnIfSlow(d.NodeConfig.OCIPrefix, d.NodeConfig.OCIBinary, "inspect", "-f", "{{.State.Status}}", d.MachineName)
if err != nil {
return state.Error, err
}
o := strings.TrimSpace(string(out))
switch o {
case "configured":
return state.Stopped, nil
case "running":
return state.Running, nil
case "exited":
return state.Stopped, nil
case "paused":
return state.Paused, nil
case "restarting":
return state.Starting, nil
case "dead":
return state.Error, nil
default:
return state.None, fmt.Errorf("unknown state")
}
return oci.ContainerStatus(d.OCIPrefix, d.OCIBinary, d.MachineName, true)
}
// Kill stops a host forcefully, including any containers that we are managing.

View File

@ -0,0 +1,137 @@
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oci
import (
"bytes"
"context"
"fmt"
"io"
"os/exec"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/minikube/pkg/minikube/out"
)
// RunResult holds the results of a Runner
type RunResult struct {
Stdout bytes.Buffer
Stderr bytes.Buffer
ExitCode int
Args []string // the args that was passed to Runner
}
// Command returns a human readable command string that does not induce eye fatigue
func (rr RunResult) Command() string {
var sb strings.Builder
sb.WriteString(rr.Args[0])
for _, a := range rr.Args[1:] {
if strings.Contains(a, " ") {
sb.WriteString(fmt.Sprintf(` "%s"`, a))
continue
}
sb.WriteString(fmt.Sprintf(" %s", a))
}
return sb.String()
}
// Output returns human-readable output for an execution result
func (rr RunResult) Output() string {
var sb strings.Builder
if rr.Stdout.Len() > 0 {
sb.WriteString(fmt.Sprintf("-- stdout --\n%s\n-- /stdout --", rr.Stdout.Bytes()))
}
if rr.Stderr.Len() > 0 {
sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", rr.Stderr.Bytes()))
}
return sb.String()
}
// runCmd runs a command exec.Command against docker daemon or podman
func runCmd(cmd *exec.Cmd, warnSlow ...bool) (*RunResult, error) {
warn := false
if len(warnSlow) > 0 {
warn = warnSlow[0]
}
killTime := 19 * time.Second // this will be applied only if warnSlow is true
warnTime := 2 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), killTime)
defer cancel()
if cmd.Args[1] == "volume" || cmd.Args[1] == "ps" { // volume and ps requires more time than inspect
killTime = 30 * time.Second
warnTime = 3 * time.Second
}
if warn { // convert exec.Command to with context
cmdWithCtx := exec.CommandContext(ctx, cmd.Args[0], cmd.Args[1:]...)
cmdWithCtx.Stdout = cmd.Stdout //copying the original command
cmdWithCtx.Stderr = cmd.Stderr
cmd = cmdWithCtx
}
rr := &RunResult{Args: cmd.Args}
glog.Infof("Run: %v", rr.Command())
var outb, errb io.Writer
if cmd.Stdout == nil {
var so bytes.Buffer
outb = io.MultiWriter(&so, &rr.Stdout)
} else {
outb = io.MultiWriter(cmd.Stdout, &rr.Stdout)
}
if cmd.Stderr == nil {
var se bytes.Buffer
errb = io.MultiWriter(&se, &rr.Stderr)
} else {
errb = io.MultiWriter(cmd.Stderr, &rr.Stderr)
}
cmd.Stdout = outb
cmd.Stderr = errb
start := time.Now()
err := cmd.Run()
elapsed := time.Since(start)
if warn {
if elapsed > warnTime {
out.WarningT(`Executing "{{.command}}" took an unusually long time: {{.duration}}`, out.V{"command": rr.Command(), "duration": elapsed})
out.ErrT(out.Tip, `Restarting the {{.name}} service may improve performance.`, out.V{"name": cmd.Args[0]})
}
if ctx.Err() == context.DeadlineExceeded {
return rr, fmt.Errorf("%q timed out after %s", rr.Command(), killTime)
}
}
if exitError, ok := err.(*exec.ExitError); ok {
rr.ExitCode = exitError.ExitCode()
}
// Decrease log spam
if elapsed > (1 * time.Second) {
glog.Infof("Completed: %s: (%s)", rr.Command(), elapsed)
}
if err == nil {
return rr, nil
}
return rr, fmt.Errorf("%s: %v\nstdout:\n%s\nstderr:\n%s", rr.Command(), err, rr.Stdout.String(), rr.Stderr.String())
}

View File

@ -216,15 +216,12 @@ type podmanSysInfo struct {
// dockerSystemInfo returns docker system info --format '{{json .}}'
func dockerSystemInfo() (dockerSysInfo, error) {
var ds dockerSysInfo
cmd := exec.Command(Docker, "system", "info", "--format", "{{json .}}")
out, err := cmd.CombinedOutput()
rr, err := runCmd(exec.Command(Docker, "system", "info", "--format", "{{json .}}"))
if err != nil {
return ds, errors.Wrap(err, "get docker system info")
}
if err := json.Unmarshal([]byte(strings.TrimSpace(string(out))), &ds); err != nil {
if err := json.Unmarshal([]byte(strings.TrimSpace(rr.Stdout.String())), &ds); err != nil {
return ds, errors.Wrapf(err, "unmarshal docker system info")
}
@ -234,12 +231,12 @@ func dockerSystemInfo() (dockerSysInfo, error) {
// podmanSysInfo returns podman system info --format '{{json .}}'
func podmanSystemInfo() (podmanSysInfo, error) {
var ps podmanSysInfo
cmd := exec.Command("sudo", Podman, "system", "info", "--format", "{{json .}}")
out, err := cmd.CombinedOutput()
rr, err := runCmd(exec.Command("sudo", Podman, "system", "info", "--format", "'{{json .}}'"))
if err != nil {
return ps, errors.Wrap(err, "get podman system info")
}
if err := json.Unmarshal([]byte(strings.TrimSpace(string(out))), &ps); err != nil {
if err := json.Unmarshal([]byte(strings.TrimSpace(rr.Stdout.String())), &ps); err != nil {
return ps, errors.Wrapf(err, "unmarshal podman system info")
}
return ps, nil

View File

@ -43,12 +43,10 @@ func RoutableHostIPFromInside(ociBin string, containerName string) (net.IP, erro
// digDNS will get the IP record for a dns
func digDNS(ociBin, containerName, dns string) (net.IP, error) {
cmd := exec.Command(ociBin, "exec", "-t", containerName, "dig", "+short", dns)
out, err := cmd.CombinedOutput()
ip := net.ParseIP(strings.TrimSpace(string(out)))
rr, err := runCmd(exec.Command(ociBin, "exec", "-t", containerName, "dig", "+short", dns))
ip := net.ParseIP(strings.TrimSpace(rr.Stdout.String()))
if err != nil {
return ip, errors.Wrapf(err, "resolve dns to ip: %s", string(out))
return ip, errors.Wrapf(err, "resolve dns to ip")
}
glog.Infof("got host ip for mount in container by digging dns: %s", ip.String())
@ -58,23 +56,19 @@ func digDNS(ociBin, containerName, dns string) (net.IP, error) {
// dockerGatewayIP gets the default gateway ip for the docker bridge on the user's host machine
// gets the ip from user's host docker
func dockerGatewayIP() (net.IP, error) {
cmd := exec.Command(Docker, "network", "ls", "--filter", "name=bridge", "--format", "{{.ID}}")
out, err := cmd.CombinedOutput()
rr, err := runCmd(exec.Command(Docker, "network", "ls", "--filter", "name=bridge", "--format", "{{.ID}}"))
if err != nil {
return nil, errors.Wrapf(err, "get network bridge. output: %s", string(out))
return nil, errors.Wrapf(err, "get network bridge")
}
bridgeID := strings.TrimSpace(string(out))
cmd = exec.Command("env", Docker, "inspect",
"--format", "{{(index .IPAM.Config 0).Gateway}}", bridgeID)
out, err = cmd.CombinedOutput()
bridgeID := strings.TrimSpace(rr.Stdout.String())
rr, err = runCmd(exec.Command("env", Docker, "inspect",
"--format", "{{(index .IPAM.Config 0).Gateway}}", bridgeID))
if err != nil {
return nil, errors.Wrapf(err, "inspect IP gatway for bridge network: %q. output: %s", string(out), bridgeID)
return nil, errors.Wrapf(err, "inspect IP bridge network %q.", bridgeID)
}
ip := net.ParseIP(strings.TrimSpace(string(out)))
ip := net.ParseIP(strings.TrimSpace(rr.Stdout.String()))
glog.Infof("got host ip for mount in container by inspect docker network: %s", ip.String())
return ip, nil
}
@ -84,26 +78,24 @@ func dockerGatewayIP() (net.IP, error) {
// will return the docker assigned port:
// 32769, nil
// only supports TCP ports
func ForwardedPort(ociBinary string, ociID string, contPort int) (int, error) {
var out []byte
func ForwardedPort(ociBin string, ociID string, contPort int) (int, error) {
var rr *RunResult
var err error
if ociBinary == Podman {
if ociBin == Podman {
//podman inspect -f "{{range .NetworkSettings.Ports}}{{if eq .ContainerPort "80"}}{{.HostPort}}{{end}}{{end}}"
cmd := exec.Command("sudo", ociBinary, "inspect", "-f", fmt.Sprintf("{{range .NetworkSettings.Ports}}{{if eq .ContainerPort %s}}{{.HostPort}}{{end}}{{end}}", fmt.Sprint(contPort)), ociID)
out, err = cmd.CombinedOutput()
rr, err = runCmd(exec.Command("sudo", ociBin, "inspect", "-f", fmt.Sprintf("{{range .NetworkSettings.Ports}}{{if eq .ContainerPort %s}}{{.HostPort}}{{end}}{{end}}", fmt.Sprint(contPort)), ociID))
if err != nil {
return 0, errors.Wrapf(err, "get host-bind port %d for %q, output %s", contPort, ociID, out)
return 0, errors.Wrapf(err, "get port %d for %q", contPort, ociID)
}
} else {
cmd := exec.Command("env", ociBinary, "inspect", "-f", fmt.Sprintf("'{{(index (index .NetworkSettings.Ports \"%d/tcp\") 0).HostPort}}'", contPort), ociID)
out, err = cmd.CombinedOutput()
rr, err = runCmd(exec.Command("env", ociBin, "inspect", "-f", fmt.Sprintf("'{{(index (index .NetworkSettings.Ports \"%d/tcp\") 0).HostPort}}'", contPort), ociID))
if err != nil {
return 0, errors.Wrapf(err, "get host-bind port %d for %q, output %s", contPort, ociID, out)
return 0, errors.Wrapf(err, "get port %d for %q", contPort, ociID)
}
}
o := strings.TrimSpace(string(out))
o := strings.TrimSpace(rr.Stdout.String())
o = strings.Trim(o, "'")
p, err := strconv.Atoi(o)
@ -115,8 +107,8 @@ func ForwardedPort(ociBinary string, ociID string, contPort int) (int, error) {
}
// ContainerIPs returns ipv4,ipv6, error of a container by their name
func ContainerIPs(ociBinary string, name string) (string, string, error) {
if ociBinary == Podman {
func ContainerIPs(ociBin string, name string) (string, string, error) {
if ociBin == Podman {
return podmanConttainerIP(name)
}
return dockerContainerIP(name)
@ -124,14 +116,13 @@ func ContainerIPs(ociBinary string, name string) (string, string, error) {
// podmanConttainerIP returns ipv4, ipv6 of container or error
func podmanConttainerIP(name string) (string, string, error) {
cmd := exec.Command("sudo", Podman, "inspect",
rr, err := runCmd(exec.Command("sudo", Podman, "inspect",
"-f", "{{.NetworkSettings.IPAddress}}",
name)
out, err := cmd.CombinedOutput()
name))
if err != nil {
return "", "", errors.Wrapf(err, "podman inspect ip %s", name)
}
output := strings.TrimSpace(string(out))
output := strings.TrimSpace(rr.Stdout.String())
if err == nil && output == "" { // podman returns empty for 127.0.0.1
return DefaultBindIPV4, "", nil
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package oci
import (
"context"
"os"
"time"
@ -28,7 +27,6 @@ import (
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/util/retry"
"fmt"
@ -42,7 +40,7 @@ import (
func DeleteContainersByLabel(prefix string, ociBin string, label string) []error {
var deleteErrs []error
cs, err := listContainersByLabel(prefix, ociBin, label)
cs, err := ListContainersByLabel(prefix, ociBin, label)
if err != nil {
return []error{fmt.Errorf("listing containers by label %q", label)}
}
@ -63,9 +61,9 @@ func DeleteContainersByLabel(prefix string, ociBin string, label string) []error
if err := ShutDown(prefix, ociBin, c); err != nil {
glog.Infof("couldn't shut down %s (might be okay): %v ", c, err)
}
cmd := exec.Command(prefix, ociBin, "rm", "-f", "-v", c)
if out, err := cmd.CombinedOutput(); err != nil {
deleteErrs = append(deleteErrs, errors.Wrapf(err, "delete container %s: output %s", c, out))
if _, err := runCmd(exec.Command(prefix, ociBin, "rm", "-f", "-v", c)); err != nil {
deleteErrs = append(deleteErrs, errors.Wrapf(err, "delete container %s: output %s", c, err))
}
}
@ -83,9 +81,9 @@ func DeleteContainer(prefix string, ociBin string, name string) error {
if err := ShutDown(prefix, ociBin, name); err != nil {
glog.Infof("couldn't shut down %s (might be okay): %v ", name, err)
}
cmd := exec.Command(prefix, ociBin, "rm", "-f", "-v", name)
if out, err := cmd.CombinedOutput(); err != nil {
return errors.Wrapf(err, "delete container %s: output %s", name, out)
if _, err := runCmd(exec.Command(prefix, ociBin, "rm", "-f", "-v", name)); err != nil {
return errors.Wrapf(err, "delete %s", name)
}
return nil
}
@ -213,9 +211,8 @@ func createContainer(prefix string, ociBin string, image string, opts ...createO
args = append(args, image)
args = append(args, o.ContainerArgs...)
out, err := exec.Command(prefix, args...).CombinedOutput()
if err != nil {
return errors.Wrapf(err, "failed args: %v output: %s", args, out)
if _, err := runCmd(exec.Command(prefix, args...)); err != nil {
return err
}
return nil
@ -228,72 +225,33 @@ func Copy(prefix string, ociBin string, ociID string, targetDir string, fName st
}
destination := fmt.Sprintf("%s:%s", ociID, targetDir)
cmd := exec.Command(prefix, ociBin, "cp", fName, destination)
if err := cmd.Run(); err != nil {
return errors.Wrapf(err, "error copying %s into node", fName)
if _, err := runCmd(exec.Command(prefix, ociBin, "cp", fName, destination)); err != nil {
return err
}
return nil
}
// ContainerID returns id of a container name
func ContainerID(prefix string, ociBin string, nameOrID string) (string, error) {
cmd := exec.Command(prefix, ociBin, "inspect", "-f", "{{.Id}}", nameOrID)
out, err := cmd.CombinedOutput()
func ContainerID(prefix, ociBin string, nameOrID string) (string, error) {
rr, err := runCmd(exec.Command(prefix, ociBin, "inspect", "-f", "{{.Id}}", nameOrID))
if err != nil { // don't return error if not found, only return empty string
if strings.Contains(string(out), "Error: No such object:") || strings.Contains(string(out), "unable to find") {
if strings.Contains(rr.Stdout.String(), "Error: No such object:") || strings.Contains(rr.Stdout.String(), "unable to find") {
err = nil
}
out = []byte{}
return "", err
}
return string(out), err
}
// WarnIfSlow runs an oci command, warning about performance issues
func WarnIfSlow(args ...string) ([]byte, error) {
killTime := 19 * time.Second
warnTime := 2 * time.Second
if args[1] == "volume" || args[1] == "ps" { // volume and ps requires more time than inspect
killTime = 30 * time.Second
warnTime = 3 * time.Second
}
ctx, cancel := context.WithTimeout(context.Background(), killTime)
defer cancel()
start := time.Now()
glog.Infof("executing with %s timeout: %v", args, killTime)
cmd := exec.CommandContext(ctx, args[0], args[1:]...)
stdout, err := cmd.Output()
d := time.Since(start)
if d > warnTime {
out.WarningT(`Executing "{{.command}}" took an unusually long time: {{.duration}}`, out.V{"command": strings.Join(cmd.Args, " "), "duration": d})
out.ErrT(out.Tip, `Restarting the {{.name}} service may improve performance.`, out.V{"name": args[0]})
}
if ctx.Err() == context.DeadlineExceeded {
return stdout, fmt.Errorf("%q timed out after %s", strings.Join(cmd.Args, " "), killTime)
}
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
return stdout, fmt.Errorf("%q failed: %v: %s", strings.Join(cmd.Args, " "), exitErr, exitErr.Stderr)
}
return stdout, fmt.Errorf("%q failed: %v", strings.Join(cmd.Args, " "), err)
}
return stdout, nil
return rr.Stdout.String(), nil
}
// ContainerExists checks if container name exists (either running or exited)
func ContainerExists(prefix string, ociBin string, name string) (bool, error) {
out, err := WarnIfSlow(prefix, ociBin, "ps", "-a", "--format", "{{.Names}}")
func ContainerExists(prefix string, ociBin string, name string, warnSlow ...bool) (bool, error) {
rr, err := runCmd(exec.Command(ociBin, "ps", "-a", "--format", "{{.Names}}"), warnSlow...)
if err != nil {
return false, errors.Wrapf(err, string(out))
return false, err
}
containers := strings.Split(string(out), "\n")
containers := strings.Split(rr.Stdout.String(), "\n")
for _, c := range containers {
if strings.TrimSpace(c) == name {
return true, nil
@ -305,36 +263,32 @@ func ContainerExists(prefix string, ociBin string, name string) (bool, error) {
// IsCreatedByMinikube returns true if the container was created by minikube
// with default assumption that it is not created by minikube when we don't know for sure
func IsCreatedByMinikube(prefix string, ociBin string, nameOrID string) bool {
cmd := exec.Command(prefix, ociBin, "inspect", nameOrID, "--format", "{{.Config.Labels}}")
out, err := cmd.CombinedOutput()
func IsCreatedByMinikube(prefix, ociBin string, nameOrID string) bool {
rr, err := runCmd(exec.Command(prefix, ociBin, "inspect", nameOrID, "--format", "{{.Config.Labels}}"))
if err != nil {
return false
}
if strings.Contains(string(out), fmt.Sprintf("%s:true", CreatedByLabelKey)) {
if strings.Contains(rr.Stdout.String(), fmt.Sprintf("%s:true", CreatedByLabelKey)) {
return true
}
return false
}
// ListOwnedContainers lists all the containres that kic driver created on user's machine using a label
func ListOwnedContainers(prefix string, ociBin string) ([]string, error) {
return listContainersByLabel(prefix, ociBin, ProfileLabelKey)
return ListContainersByLabel(prefix, ociBin, ProfileLabelKey)
}
// inspect return low-level information on containers
func inspect(prefix string, ociBin string, containerNameOrID, format string) ([]string, error) {
cmd := exec.Command(prefix, ociBin, "inspect",
"-f", format,
containerNameOrID) // ... against the "node" container
var buff bytes.Buffer
cmd.Stdout = &buff
cmd.Stderr = &buff
err := cmd.Run()
_, err := runCmd(cmd)
scanner := bufio.NewScanner(&buff)
var lines []string
for scanner.Scan() {
@ -395,8 +349,8 @@ func isUsernsRemapEnabled(prefix string, ociBin string) bool {
var buff bytes.Buffer
cmd.Stdout = &buff
cmd.Stderr = &buff
err := cmd.Run()
if err != nil {
if _, err := runCmd(cmd); err != nil {
return false
}
@ -452,13 +406,13 @@ func withPortMappings(portMappings []PortMapping) createOpt {
}
}
// listContainersByLabel returns all the container names with a specified label
func listContainersByLabel(prefix string, ociBin string, label string) ([]string, error) {
stdout, err := WarnIfSlow(prefix, ociBin, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}")
// ListContainersByLabel returns all the container names with a specified label
func ListContainersByLabel(prefix string, ociBin string, label string, warnSlow ...bool) ([]string, error) {
rr, err := runCmd(exec.Command(prefix, ociBin, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}"), warnSlow...)
if err != nil {
return nil, err
}
s := bufio.NewScanner(bytes.NewReader(stdout))
s := bufio.NewScanner(bytes.NewReader(rr.Stdout.Bytes()))
var names []string
for s.Scan() {
n := strings.TrimSpace(s.Text())
@ -489,18 +443,19 @@ func PointToHostDockerDaemon() error {
}
// ContainerRunning returns running state of a container
func ContainerRunning(prefix string, ociBin string, name string) (bool, error) {
out, err := WarnIfSlow(prefix, ociBin, "inspect", name, "--format={{.State.Running}}")
func ContainerRunning(prefix string, ociBin string, name string, warnSlow ...bool) (bool, error) {
rr, err := runCmd(exec.Command(prefix, ociBin, "inspect", name, "--format={{.State.Running}}"), warnSlow...)
if err != nil {
return false, err
}
return strconv.ParseBool(strings.TrimSpace(string(out)))
return strconv.ParseBool(strings.TrimSpace(rr.Stdout.String()))
}
// ContainerStatus returns status of a container running,exited,...
func ContainerStatus(prefix string, ociBin string, name string) (state.State, error) {
out, err := WarnIfSlow(prefix, ociBin, "inspect", name, "--format={{.State.Status}}")
o := strings.TrimSpace(string(out))
func ContainerStatus(prefix string, ociBin string, name string, warnSlow ...bool) (state.State, error) {
cmd := exec.Command(prefix, ociBin, "inspect", name, "--format={{.State.Status}}")
rr, err := runCmd(cmd, warnSlow...)
o := strings.TrimSpace(rr.Stdout.String())
switch o {
case "running":
return state.Running, nil
@ -517,13 +472,12 @@ func ContainerStatus(prefix string, ociBin string, name string) (state.State, er
}
}
// Shutdown will run command to shut down the container
// ShutDown will run command to shut down the container
// to ensure the containers process and networking bindings are all closed
// to avoid containers getting stuck before delete https://github.com/kubernetes/minikube/issues/7657
func ShutDown(prefix string, ociBin string, name string) error {
cmd := exec.Command(prefix, ociBin, "exec", "--privileged", "-t", name, "/bin/bash", "-c", "sudo init 0")
if out, err := cmd.CombinedOutput(); err != nil {
glog.Infof("error shutdown %s output %q : %v", name, out, err)
if _, err := runCmd(exec.Command(prefix, ociBin, "exec", "--privileged", "-t", name, "/bin/bash", "-c", "sudo init 0")); err != nil {
glog.Infof("error shutdown %s: %v", name, err)
}
// helps with allowing docker realize the container is exited and report its status correctly.
time.Sleep(time.Second * 1)

View File

@ -29,7 +29,7 @@ import (
// DeleteAllVolumesByLabel deletes all volumes that have a specific label
// if there is no volume to delete it will return nil
func DeleteAllVolumesByLabel(prefix string, ociBin string, label string) []error {
func DeleteAllVolumesByLabel(prefix string, ociBin string, label string, warnSlow ...bool) []error {
var deleteErrs []error
glog.Infof("trying to delete all %s volumes with label %s", ociBin, label)
@ -40,7 +40,7 @@ func DeleteAllVolumesByLabel(prefix string, ociBin string, label string) []error
}
for _, v := range vs {
if _, err := WarnIfSlow(prefix, ociBin, "volume", "rm", "--force", v); err != nil {
if _, err := runCmd(exec.Command(prefix, ociBin, "volume", "rm", "--force", v), warnSlow...); err != nil {
deleteErrs = append(deleteErrs, fmt.Errorf("deleting %q", v))
}
}
@ -51,11 +51,11 @@ func DeleteAllVolumesByLabel(prefix string, ociBin string, label string) []error
// PruneAllVolumesByLabel deletes all volumes that have a specific label
// if there is no volume to delete it will return nil
// example: docker volume prune -f --filter label=name.minikube.sigs.k8s.io=minikube
func PruneAllVolumesByLabel(prefix string, ociBin string, label string) []error {
func PruneAllVolumesByLabel(prefix string, ociBin string, label string, warnSlow ...bool) []error {
var deleteErrs []error
glog.Infof("trying to prune all %s volumes with label %s", ociBin, label)
if _, err := WarnIfSlow(prefix, ociBin, "volume", "prune", "-f", "--filter", "label="+label); err != nil {
cmd := exec.Command(prefix, ociBin, "volume", "prune", "-f", "--filter", "label="+label)
if _, err := runCmd(cmd, warnSlow...); err != nil {
deleteErrs = append(deleteErrs, errors.Wrapf(err, "prune volume by label %s", label))
}
@ -65,9 +65,8 @@ func PruneAllVolumesByLabel(prefix string, ociBin string, label string) []error
// allVolumesByLabel returns name of all docker volumes by a specific label
// will not return error if there is no volume found.
func allVolumesByLabel(prefix string, ociBin string, label string) ([]string, error) {
cmd := exec.Command(prefix, ociBin, "volume", "ls", "--filter", "label="+label, "--format", "{{.Name}}")
stdout, err := cmd.Output()
s := bufio.NewScanner(bytes.NewReader(stdout))
rr, err := runCmd(exec.Command(prefix, ociBin, "volume", "ls", "--filter", "label="+label, "--format", "{{.Name}}"))
s := bufio.NewScanner(bytes.NewReader(rr.Stdout.Bytes()))
var vols []string
for s.Scan() {
v := strings.TrimSpace(s.Text())
@ -82,9 +81,8 @@ func allVolumesByLabel(prefix string, ociBin string, label string) ([]string, er
// to the volume named volumeName
func ExtractTarballToVolume(tarballPath, volumeName, imageName string) error {
cmd := exec.Command(Docker, "run", "--rm", "--entrypoint", "/usr/bin/tar", "-v", fmt.Sprintf("%s:/preloaded.tar:ro", tarballPath), "-v", fmt.Sprintf("%s:/extractDir", volumeName), imageName, "-I", "lz4", "-xvf", "/preloaded.tar", "-C", "/extractDir")
glog.Infof("executing: %s", cmd.Args)
if out, err := cmd.CombinedOutput(); err != nil {
return errors.Wrapf(err, "output %s", string(out))
if _, err := runCmd(cmd); err != nil {
return err
}
return nil
}
@ -93,10 +91,8 @@ func ExtractTarballToVolume(tarballPath, volumeName, imageName string) error {
// Caution ! if volume already exists does NOT return an error and will not apply the minikube labels on it.
// TODO: this should be fixed as a part of https://github.com/kubernetes/minikube/issues/6530
func createDockerVolume(profile string, nodeName string) error {
cmd := exec.Command(Docker, "volume", "create", nodeName, "--label", fmt.Sprintf("%s=%s", ProfileLabelKey, profile), "--label", fmt.Sprintf("%s=%s", CreatedByLabelKey, "true"))
glog.Infof("executing: %s", cmd.Args)
if out, err := cmd.CombinedOutput(); err != nil {
return errors.Wrapf(err, "output %s", string(out))
if _, err := runCmd(exec.Command(Docker, "volume", "create", nodeName, "--label", fmt.Sprintf("%s=%s", ProfileLabelKey, profile), "--label", fmt.Sprintf("%s=%s", CreatedByLabelKey, "true"))); err != nil {
return err
}
return nil
}

View File

@ -355,6 +355,20 @@ var Addons = map[string]*Addon{
"0640",
false),
}, false, "ingress-dns"),
"metallb": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/metallb/metallb.yaml",
vmpath.GuestAddonsDir,
"metallb.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/metallb/metallb-config.yaml.tmpl",
vmpath.GuestAddonsDir,
"metallb-config.yaml",
"0640",
true),
}, false, "metallb"),
}
// GenerateTemplateData generates template data for template assets
@ -368,13 +382,17 @@ func GenerateTemplateData(cfg config.KubernetesConfig) interface{} {
ea = "-" + runtime.GOARCH
}
opts := struct {
Arch string
ExoticArch string
ImageRepository string
Arch string
ExoticArch string
ImageRepository string
LoadBalancerStartIP string
LoadBalancerEndIP string
}{
Arch: a,
ExoticArch: ea,
ImageRepository: cfg.ImageRepository,
Arch: a,
ExoticArch: ea,
ImageRepository: cfg.ImageRepository,
LoadBalancerStartIP: cfg.LoadBalancerStartIP,
LoadBalancerEndIP: cfg.LoadBalancerEndIP,
}
return opts

View File

@ -50,6 +50,8 @@ var componentToKubeadmConfigKey = map[string]string{
ControllerManager: "controllerManager",
Scheduler: "scheduler",
Kubeadm: "kubeadm",
// The KubeProxy is handled in different config block
Kubeproxy: "",
// The Kubelet is not configured in kubeadm, only in systemd.
Kubelet: "",
}
@ -178,6 +180,9 @@ func optionPairsForComponent(component string, version semver.Version, cp config
return nil
}
// kubeadm extra args should not be included in the kubeadm config in the extra args section (instead, they must
// be inserted explicitly in the appropriate places or supplied from the command line); here we remove all of the
// kubeadm extra args from the slice
// createExtraComponentConfig generates a map of component to extra args for all of the components except kubeadm
func createExtraComponentConfig(extraOptions config.ExtraOptionSlice, version semver.Version, componentFeatureArgs string, cp config.Node) ([]componentOptions, error) {
extraArgsSlice, err := newComponentOptions(extraOptions, version, componentFeatureArgs, cp)
@ -185,9 +190,6 @@ func createExtraComponentConfig(extraOptions config.ExtraOptionSlice, version se
return nil, err
}
// kubeadm extra args should not be included in the kubeadm config in the extra args section (instead, they must
// be inserted explicitly in the appropriate places or supplied from the command line); here we remove all of the
// kubeadm extra args from the slice
for i, extraArgs := range extraArgsSlice {
if extraArgs.Component == Kubeadm {
extraArgsSlice = append(extraArgsSlice[:i], extraArgsSlice[i+1:]...)
@ -197,6 +199,12 @@ func createExtraComponentConfig(extraOptions config.ExtraOptionSlice, version se
return extraArgsSlice, nil
}
// createKubeProxyOptions generates a map of extra config for kube-proxy
func createKubeProxyOptions(extraOptions config.ExtraOptionSlice) map[string]string {
kubeProxyOptions := extraOptions.AsMap().Get(Kubeproxy)
return kubeProxyOptions
}
func convertToFlags(opts map[string]string) string {
var flags []string
var keys []string

View File

@ -28,7 +28,7 @@ var KubeadmYamlPath = path.Join(vmpath.GuestEphemeralDir, "kubeadm.yaml")
const (
//DefaultCNIConfigPath is the configuration file for CNI networks
DefaultCNIConfigPath = "/etc/cni/net.d/k8s.conf"
DefaultCNIConfigPath = "/etc/cni/net.d/1-k8s.conf"
// KubeletServiceFile is the file for the systemd kubelet.service
KubeletServiceFile = "/lib/systemd/system/kubelet.service"
// KubeletSystemdConfFile is config for the systemd kubelet.service

View File

@ -82,5 +82,9 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "{{.PodSubnet }}"
metricsBindAddress: {{.AdvertiseAddress}}:10249
{{- range $i, $val := printMapInOrder .KubeProxyOptions ": " }}
{{$val}}
{{- end}}
`))

View File

@ -80,5 +80,9 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "{{.PodSubnet }}"
metricsBindAddress: {{.AdvertiseAddress}}:10249
{{- range $i, $val := printMapInOrder .KubeProxyOptions ": " }}
{{$val}}
{{- end}}
`))

View File

@ -83,6 +83,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana
NoTaintMaster bool
NodeIP string
ControlPlaneAddress string
KubeProxyOptions map[string]string
}{
CertDir: vmpath.GuestKubernetesCertsDir,
ServiceCIDR: constants.DefaultServiceCIDR,
@ -102,6 +103,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana
DNSDomain: k8s.DNSDomain,
NodeIP: n.IP,
ControlPlaneAddress: cp.IP,
KubeProxyOptions: createKubeProxyOptions(k8s.ExtraOptions),
}
if k8s.ServiceCIDR != "" {
@ -135,6 +137,7 @@ const (
Apiserver = "apiserver"
Scheduler = "scheduler"
ControllerManager = "controller-manager"
Kubeproxy = "kube-proxy"
)
// InvokeKubeadm returns the invocation command for Kubeadm

View File

@ -55,6 +55,11 @@ func getExtraOpts() []config.ExtraOption {
Key: "dry-run",
Value: "true",
},
config.ExtraOption{
Component: Kubeproxy,
Key: "mode",
Value: "iptables",
},
}
}

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -60,4 +60,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -51,4 +51,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -57,4 +57,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -60,4 +60,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -51,4 +51,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -57,4 +57,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -60,4 +60,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -51,4 +51,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -57,4 +57,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -58,4 +58,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -49,4 +49,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -55,4 +55,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -58,4 +58,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -49,4 +49,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -55,4 +55,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -58,4 +58,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -49,4 +49,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -55,4 +55,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -135,7 +135,7 @@ func dashboardFrontend(repo string) string {
repo = "kubernetesui"
}
// See 'kubernetes-dashboard' in deploy/addons/dashboard/dashboard-dp.yaml
return path.Join(repo, "dashboard:v2.0.0-rc6")
return path.Join(repo, "dashboard:v2.0.0")
}
// dashboardMetrics returns the image used for the dashboard metrics scraper

View File

@ -25,7 +25,7 @@ import (
func TestAuxiliary(t *testing.T) {
want := []string{
"gcr.io/k8s-minikube/storage-provisioner:v1.8.1",
"kubernetesui/dashboard:v2.0.0-rc6",
"kubernetesui/dashboard:v2.0.0",
"kubernetesui/metrics-scraper:v1.0.2",
}
got := auxiliary("")
@ -37,7 +37,7 @@ func TestAuxiliary(t *testing.T) {
func TestAuxiliaryMirror(t *testing.T) {
want := []string{
"test.mirror/storage-provisioner:v1.8.1",
"test.mirror/dashboard:v2.0.0-rc6",
"test.mirror/dashboard:v2.0.0",
"test.mirror/metrics-scraper:v1.0.2",
}
got := auxiliary("test.mirror")

View File

@ -38,7 +38,7 @@ func TestKubeadmImages(t *testing.T) {
"k8s.gcr.io/etcd:3.4.3-0",
"k8s.gcr.io/pause:3.1",
"gcr.io/k8s-minikube/storage-provisioner:v1.8.1",
"kubernetesui/dashboard:v2.0.0-rc6",
"kubernetesui/dashboard:v2.0.0",
"kubernetesui/metrics-scraper:v1.0.2",
}},
{"v1.16.1", "mirror.k8s.io", []string{
@ -50,7 +50,7 @@ func TestKubeadmImages(t *testing.T) {
"mirror.k8s.io/etcd:3.3.15-0",
"mirror.k8s.io/pause:3.1",
"mirror.k8s.io/storage-provisioner:v1.8.1",
"mirror.k8s.io/dashboard:v2.0.0-rc6",
"mirror.k8s.io/dashboard:v2.0.0",
"mirror.k8s.io/metrics-scraper:v1.0.2",
}},
{"v1.15.0", "", []string{
@ -62,7 +62,7 @@ func TestKubeadmImages(t *testing.T) {
"k8s.gcr.io/etcd:3.3.10",
"k8s.gcr.io/pause:3.1",
"gcr.io/k8s-minikube/storage-provisioner:v1.8.1",
"kubernetesui/dashboard:v2.0.0-rc6",
"kubernetesui/dashboard:v2.0.0",
"kubernetesui/metrics-scraper:v1.0.2",
}},
{"v1.14.0", "", []string{
@ -74,7 +74,7 @@ func TestKubeadmImages(t *testing.T) {
"k8s.gcr.io/etcd:3.3.10",
"k8s.gcr.io/pause:3.1",
"gcr.io/k8s-minikube/storage-provisioner:v1.8.1",
"kubernetesui/dashboard:v2.0.0-rc6",
"kubernetesui/dashboard:v2.0.0",
"kubernetesui/metrics-scraper:v1.0.2",
}},
{"v1.13.0", "", []string{
@ -86,7 +86,7 @@ func TestKubeadmImages(t *testing.T) {
"k8s.gcr.io/etcd:3.2.24",
"k8s.gcr.io/pause:3.1",
"gcr.io/k8s-minikube/storage-provisioner:v1.8.1",
"kubernetesui/dashboard:v2.0.0-rc6",
"kubernetesui/dashboard:v2.0.0",
"kubernetesui/metrics-scraper:v1.0.2",
}},
{"v1.12.0", "", []string{
@ -98,7 +98,7 @@ func TestKubeadmImages(t *testing.T) {
"k8s.gcr.io/etcd:3.2.24",
"k8s.gcr.io/pause:3.1",
"gcr.io/k8s-minikube/storage-provisioner:v1.8.1",
"kubernetesui/dashboard:v2.0.0-rc6",
"kubernetesui/dashboard:v2.0.0",
"kubernetesui/metrics-scraper:v1.0.2",
}},
}

View File

@ -177,14 +177,27 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
"FileAvailable--etc-kubernetes-manifests-etcd.yaml",
"Port-10250", // For "none" users who already have a kubelet online
"Swap", // For "none" users who have swap configured
"SystemVerification",
}
ignore = append(ignore, bsutil.SkipAdditionalPreflights[r.Name()]...)
skipSystemVerification := false
// Allow older kubeadm versions to function with newer Docker releases.
if version.LT(semver.MustParse("1.13.0")) {
glog.Infof("ignoring SystemVerification for kubeadm because of old kubernetes version %v", version)
skipSystemVerification = true
}
if driver.BareMetal(cfg.Driver) && r.Name() == "Docker" {
if v, err := r.Version(); err == nil && strings.Contains(v, "azure") {
glog.Infof("ignoring SystemVerification for kubeadm because of unknown docker version %s", v)
skipSystemVerification = true
}
}
// For kic on linux example error: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.2.17-1rodete3-amd64"
if version.LT(semver.MustParse("1.13.0")) || driver.IsKIC(cfg.Driver) {
glog.Info("ignoring SystemVerification for kubeadm because of either driver or kubernetes version")
if driver.IsKIC(cfg.Driver) {
glog.Infof("ignoring SystemVerification for kubeadm because of %s driver", cfg.Driver)
skipSystemVerification = true
}
if skipSystemVerification {
ignore = append(ignore, "SystemVerification")
}
@ -335,8 +348,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
return nil
}
out.T(out.HealthCheck, "Verifying Kubernetes Components:")
out.T(out.IndentVerify, "verifying node conditions ...")
out.T(out.HealthCheck, "Verifying Kubernetes components...")
// TODO: #7706: for better performance we could use k.client inside minikube to avoid asking for external IP:PORT
hostname, _, port, err := driver.ControlPaneEndpoint(&cfg, &n, cfg.Driver)
@ -366,7 +378,6 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
}
if cfg.VerifyComponents[kverify.APIServerWaitKey] {
out.T(out.IndentVerify, "verifying api server ...")
client, err := k.client(hostname, port)
if err != nil {
return errors.Wrap(err, "get k8s client")
@ -381,7 +392,6 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
}
if cfg.VerifyComponents[kverify.SystemPodsWaitKey] {
out.T(out.IndentVerify, "verifying system pods ...")
client, err := k.client(hostname, port)
if err != nil {
return errors.Wrap(err, "get k8s client")
@ -392,7 +402,6 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
}
if cfg.VerifyComponents[kverify.DefaultSAWaitKey] {
out.T(out.IndentVerify, "verifying default service account ...")
client, err := k.client(hostname, port)
if err != nil {
return errors.Wrap(err, "get k8s client")
@ -403,7 +412,6 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
}
if cfg.VerifyComponents[kverify.AppsRunningKey] {
out.T(out.IndentVerify, "verifying apps running ...")
client, err := k.client(hostname, port)
if err != nil {
return errors.Wrap(err, "get k8s client")
@ -414,7 +422,6 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
}
if cfg.VerifyComponents[kverify.NodeReadyKey] {
out.T(out.IndentVerify, "verifying node ready")
client, err := k.client(hostname, port)
if err != nil {
return errors.Wrap(err, "get k8s client")
@ -609,14 +616,24 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) {
// DeleteCluster removes the components that were started earlier
func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket})
if err != nil {
return errors.Wrap(err, "runtime")
}
version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion)
if err != nil {
return errors.Wrap(err, "parsing kubernetes version")
}
cmd := fmt.Sprintf("%s reset --force", bsutil.InvokeKubeadm(k8s.KubernetesVersion))
ka := bsutil.InvokeKubeadm(k8s.KubernetesVersion)
sp := cr.SocketPath()
if sp == "" {
sp = kconst.DefaultDockerCRISocket
}
cmd := fmt.Sprintf("%s reset --cri-socket %s --force", ka, sp)
if version.LT(semver.MustParse("1.11.0")) {
cmd = fmt.Sprintf("%s reset", bsutil.InvokeKubeadm(k8s.KubernetesVersion))
cmd = fmt.Sprintf("%s reset --cri-socket %s", ka, sp)
}
rr, derr := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd))
@ -628,11 +645,6 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
glog.Warningf("stop kubelet: %v", err)
}
cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket})
if err != nil {
return errors.Wrap(err, "runtime")
}
containers, err := cr.ListContainers(cruntime.ListOptions{Namespaces: []string{"kube-system"}})
if err != nil {
glog.Warningf("unable to list kube-system containers: %v", err)
@ -786,6 +798,9 @@ func startKubeletIfRequired(runner command.Runner, sm sysinit.Manager) error {
return errors.Wrap(err, "starting kubelet")
}
if err := sm.Enable("kubelet"); err != nil {
return err
}
return sm.Start("kubelet")
}

View File

@ -70,19 +70,21 @@ type ClusterConfig struct {
// KubernetesConfig contains the parameters used to configure the VM Kubernetes.
type KubernetesConfig struct {
KubernetesVersion string
ClusterName string
APIServerName string
APIServerNames []string
APIServerIPs []net.IP
DNSDomain string
ContainerRuntime string
CRISocket string
NetworkPlugin string
FeatureGates string // https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/
ServiceCIDR string // the subnet which kubernetes services will be deployed to
ImageRepository string
ExtraOptions ExtraOptionSlice
KubernetesVersion string
ClusterName string
APIServerName string
APIServerNames []string
APIServerIPs []net.IP
DNSDomain string
ContainerRuntime string
CRISocket string
NetworkPlugin string
FeatureGates string // https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/
ServiceCIDR string // the subnet which kubernetes services will be deployed to
ImageRepository string
LoadBalancerStartIP string // currently only used by MetalLB addon
LoadBalancerEndIP string // currently only used by MetalLB addon
ExtraOptions ExtraOptionSlice
ShouldLoadCachedImages bool
EnableDefaultCNI bool

Some files were not shown because too many files have changed in this diff Show More