Merge branch 'master' into ilyaz/test_deb_install

pull/10464/head
Ilya Zuyev 2021-02-19 18:40:57 -08:00
commit 251f25637d
70 changed files with 1517 additions and 474 deletions

7
.github/dependabot.yml vendored Normal file
View File

@ -0,0 +1,7 @@
---
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"

View File

@ -82,7 +82,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
sudo apt-get install -y jq
- name: Run Integration Test
@ -110,7 +110,7 @@ jobs:
cd out
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')

View File

@ -1,121 +0,0 @@
name: KIC_IMAGE
on:
pull_request:
paths:
- "deploy/kicbase/**"
env:
GOPROXY: https://proxy.golang.org
jobs:
build_test_kic_image:
runs-on: [self-hosted, debian9, baremetal, equinix]
steps:
- name: Clean up
shell: bash
run: |
pwd
ls -lah
rm -rf out
ls -lah
df -h
sudo rm -f /etc/cron.hourly/cleanup_and_reboot || true
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: '1.15.5'
stable: true
- name: Download Dependencies
run: go mod download
- name: Build Binaries
run: |
sudo apt-get update
sudo apt-get install -y make build-essential
make linux
make e2e-linux-amd64
cp -r test/integration/testdata ./out
whoami
echo github ref $GITHUB_REF
echo workflow $GITHUB_WORKFLOW
echo home $HOME
echo event name $GITHUB_EVENT_NAME
echo workspace $GITHUB_WORKSPACE
echo "end of debug stuff"
echo $(which jq)
- name: Build Image
run: |
docker images
make kic-base-image
docker images
- name: Info
shell: bash
run: |
hostname
uname -r
lsb_release -a
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
sudo apt-get install -y jq
rm -f gopogh-linux-amd64 || true
- name: Run Integration Test
continue-on-error: false
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
KIC_VERSION=$(egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2)
KIC_IMG_HEAD="local/kicbase:${KIC_VERSION}-snapshot"
cd out
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args="--vm-driver=docker --base-image=${KIC_IMG_HEAD}" -test.v -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
- name: Generate HTML Report
shell: bash
run: |
cd out
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(/usr/local/bin/gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail' || true)
TestsNum=$(echo $STAT | jq '.NumberOfTests' || true)
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
echo 'STAT<<EOF' >> $GITHUB_ENV
echo "${STAT}" >> $GITHUB_ENV
echo 'EOF' >> $GITHUB_ENV
- uses: actions/upload-artifact@v1
with:
name: kic_image_functional_test_docker_ubuntu
path: out/report
- name: The End Result build_test_kic_image_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*******************${numPass} Passes :) *******************"
echo $STAT | jq '.PassedTests' || true
echo "*******************************************************"
echo "---------------- ${numFail} Failures :( ----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
if [ "$numPass" -eq 0 ];then echo "*** 0 Passed! ***";exit 2;fi
if [ "$numPass" -eq 0 ];then echo "*** Passed! ***";exit 0;fi

View File

@ -120,7 +120,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -152,7 +152,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -205,7 +205,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Install docker
shell: bash
@ -250,7 +250,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -350,7 +350,7 @@ jobs:
continue-on-error: true
shell: powershell
run: |
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
@ -487,7 +487,7 @@ jobs:
shell: powershell
run: |
$ErrorActionPreference = "SilentlyContinue"
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
@ -592,7 +592,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -622,7 +622,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -862,7 +862,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-arm64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-arm64
sudo install gopogh-linux-arm64 /usr/local/bin/gopogh
- name: Install tools
@ -925,7 +925,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -996,7 +996,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1026,7 +1026,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -1078,7 +1078,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Install docker
shell: bash
@ -1124,7 +1124,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -1190,7 +1190,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1222,7 +1222,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -1274,7 +1274,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1314,7 +1314,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -1381,7 +1381,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1411,7 +1411,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -1463,7 +1463,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1503,7 +1503,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')

View File

@ -118,7 +118,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -150,7 +150,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -203,7 +203,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Install docker
shell: bash
@ -248,7 +248,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -348,7 +348,7 @@ jobs:
continue-on-error: true
shell: powershell
run: |
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
@ -485,7 +485,7 @@ jobs:
shell: powershell
run: |
$ErrorActionPreference = "SilentlyContinue"
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
@ -782,7 +782,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -812,7 +812,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -861,7 +861,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-arm64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-arm64
sudo install gopogh-linux-arm64 /usr/local/bin/gopogh
- name: Install tools
@ -924,7 +924,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -993,7 +993,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1023,7 +1023,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -1075,7 +1075,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Install docker
shell: bash
@ -1121,7 +1121,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -1187,7 +1187,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1219,7 +1219,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -1271,7 +1271,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1311,7 +1311,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -1378,7 +1378,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1408,7 +1408,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
@ -1460,7 +1460,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1500,7 +1500,7 @@ jobs:
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
STAT=$(gopogh -in ./report/testout.json -out_html ./report/testout.html -out_summary ./report/testout_summary.json -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')

View File

@ -51,7 +51,7 @@ HYPERKIT_BUILD_IMAGE ?= karalabe/xgo-1.12.x
#
# TODO: See https://github.com/kubernetes/minikube/issues/10276
#BUILD_IMAGE ?= us.gcr.io/k8s-artifacts-prod/build-image/kube-cross:v$(GO_VERSION)-1
BUILD_IMAGE ?= golang:1.16-rc-buster
BUILD_IMAGE ?= golang:1.16.0-buster
#
ISO_BUILD_IMAGE ?= $(REGISTRY)/buildroot-image
@ -645,7 +645,7 @@ X_BUILD_ENV ?= DOCKER_CLI_EXPERIMENTAL=enabled
docker-multi-arch-builder:
env $(X_BUILD_ENV) docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
env $(X_BUILD_ENV) docker buildx rm --builder $(X_DOCKER_BUILDER) || true
env $(X_BUILD_ENV) docker buildx create --name kicbase-builder --buildkitd-flags '--debug' --use || true
env $(X_BUILD_ENV) docker buildx create --name $(X_DOCKER_BUILDER) --buildkitd-flags '--debug' || true
KICBASE_ARCH = linux/arm64,linux/amd64
KICBASE_IMAGE_REGISTRIES ?= $(REGISTRY)/kicbase:$(KIC_VERSION) $(REGISTRY_GH)/kicbase:$(KIC_VERSION) kicbase/stable:$(KIC_VERSION)
@ -662,7 +662,7 @@ endif
ifndef AUTOPUSH
$(call user_confirm, 'Are you sure you want to push $(KICBASE_IMAGE_REGISTRIES) ?')
endif
env $(X_BUILD_ENV) docker buildx build --platform $(KICBASE_ARCH) $(addprefix -t ,$(KICBASE_IMAGE_REGISTRIES)) --push --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) ./deploy/kicbase
env $(X_BUILD_ENV) docker buildx build --builder $(X_DOCKER_BUILDER) --platform $(KICBASE_ARCH) $(addprefix -t ,$(KICBASE_IMAGE_REGISTRIES)) --push --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) ./deploy/kicbase
.PHONY: upload-preloaded-images-tar
upload-preloaded-images-tar: out/minikube # Upload the preloaded images for oldest supported, newest supported, and default kubernetes versions to GCS.

View File

@ -1,4 +1,4 @@
// +build darwin
// +build darwin,!arm64
/*
Copyright 2016 The Kubernetes Authors All rights reserved.

View File

@ -28,6 +28,7 @@ import (
"os/user"
"regexp"
"runtime"
"strconv"
"strings"
"github.com/blang/semver"
@ -75,7 +76,7 @@ var (
insecureRegistry []string
apiServerNames []string
apiServerIPs []net.IP
hostRe = regexp.MustCompile(`[\w\.-]+`)
hostRe = regexp.MustCompile(`^[^-][\w\.-]+$`)
)
func init() {
@ -563,7 +564,7 @@ func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []regis
}
ds := driver.Status(d)
if ds.Name == "" {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}", out.V{"driver": d, "os": runtime.GOOS, "arch": runtime.GOARCH})
}
out.Step(style.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds, nil, true
@ -573,7 +574,7 @@ func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []regis
if d := viper.GetString("vm-driver"); d != "" {
ds := driver.Status(viper.GetString("vm-driver"))
if ds.Name == "" {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}", out.V{"driver": d, "os": runtime.GOOS, "arch": runtime.GOARCH})
}
out.Step(style.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds, nil, true
@ -712,7 +713,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
name := ds.Name
klog.Infof("validating driver %q against %+v", name, existing)
if !driver.Supported(name) {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS})
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}", out.V{"driver": name, "os": runtime.GOOS, "arch": runtime.GOARCH})
}
// if we are only downloading artifacts for a driver, we can stop validation here
@ -985,7 +986,7 @@ func validateRequestedMemorySize(req int, drvName string) {
}
}
// validateCPUCount validates the cpu count matches the minimum recommended
// validateCPUCount validates the cpu count matches the minimum recommended & not exceeding the available cpu count
func validateCPUCount(drvName string) {
var cpuCount int
if driver.BareMetal(drvName) {
@ -1019,6 +1020,22 @@ func validateCPUCount(drvName string) {
}
if si.CPUs < cpuCount {
if driver.IsDockerDesktop(drvName) {
out.Step(style.Empty, `- Ensure your {{.driver_name}} daemon has access to enough CPU/memory resources.`, out.V{"driver_name": drvName})
if runtime.GOOS == "darwin" {
out.Step(style.Empty, `- Docs https://docs.docker.com/docker-for-mac/#resources`, out.V{"driver_name": drvName})
}
if runtime.GOOS == "windows" {
out.String("\n\t")
out.Step(style.Empty, `- Docs https://docs.docker.com/docker-for-windows/#resources`, out.V{"driver_name": drvName})
}
}
exitIfNotForced(reason.RsrcInsufficientCores, "Requested cpu count {{.requested_cpus}} is greater than the available cpus of {{.avail_cpus}}", out.V{"requested_cpus": cpuCount, "avail_cpus": si.CPUs})
}
// looks good
if si.CPUs >= 2 {
return
@ -1170,28 +1187,37 @@ func validateRegistryMirror() {
}
// This function validates that the --insecure-registry follows one of the following formats:
// "<ip>:<port>" "<hostname>:<port>" "<network>/<netmask>"
// "<ip>[:<port>]" "<hostname>[:<port>]" "<network>/<netmask>"
func validateInsecureRegistry() {
if len(insecureRegistry) > 0 {
for _, addr := range insecureRegistry {
// Remove http or https from registryMirror
if strings.HasPrefix(strings.ToLower(addr), "http://") || strings.HasPrefix(strings.ToLower(addr), "https://") {
i := strings.Index(addr, "//")
addr = addr[i+2:]
} else if strings.Contains(addr, "://") || strings.HasSuffix(addr, ":") {
exit.Message(reason.Usage, "Sorry, the address provided with the --insecure-registry flag is invalid: {{.addr}}. Expected formtas are: <ip>[:<port>], <hostname>[:<port>] or <network>/<netmask>", out.V{"addr": addr})
}
hostnameOrIP, port, err := net.SplitHostPort(addr)
if err != nil {
_, _, err := net.ParseCIDR(addr)
if err == nil {
continue
}
hostnameOrIP = addr
}
if port == "" {
exit.Message(reason.Usage, "Sorry, the address provided with the --insecure-registry flag is invalid: {{.addr}}. Expected formtas are: <ip>:<port>, <hostname>:<port> or <network>/<netmask>", out.V{"addr": addr})
if !hostRe.MatchString(hostnameOrIP) && net.ParseIP(hostnameOrIP) == nil {
// fmt.Printf("This is not hostname or ip %s", hostnameOrIP)
exit.Message(reason.Usage, "Sorry, the address provided with the --insecure-registry flag is invalid: {{.addr}}. Expected formtas are: <ip>[:<port>], <hostname>[:<port>] or <network>/<netmask>", out.V{"addr": addr})
}
// checks both IPv4 and IPv6
ipAddr := net.ParseIP(hostnameOrIP)
if ipAddr != nil {
continue
}
isValidHost := hostRe.MatchString(hostnameOrIP)
if err != nil || !isValidHost {
exit.Message(reason.Usage, "Sorry, the address provided with the --insecure-registry flag is invalid: {{.addr}}. Expected formtas are: <ip>:<port>, <hostname>:<port> or <network>/<netmask>", out.V{"addr": addr})
if port != "" {
v, err := strconv.Atoi(port)
if err != nil {
exit.Message(reason.Usage, "Sorry, the address provided with the --insecure-registry flag is invalid: {{.addr}}. Expected formtas are: <ip>[:<port>], <hostname>[:<port>] or <network>/<netmask>", out.V{"addr": addr})
}
if v < 0 || v > 65535 {
exit.Message(reason.Usage, "Sorry, the address provided with the --insecure-registry flag is invalid: {{.addr}}. Expected formtas are: <ip>[:<port>], <hostname>[:<port>] or <network>/<netmask>", out.V{"addr": addr})
}
}
}
}

View File

@ -19,3 +19,5 @@ sha256 25dc558fbabc987bd58c7eab5230121b258a7b0eb34a49dc6595f1c6f3969116 v1.18.2.
sha256 d5c6442e3990938badc966cdd1eb9ebe2fc11345452c233aa0d87ca38fbeed81 v1.18.3.tar.gz
sha256 74a4e916acddc6cf47ab5752bdebb6732ce2c028505ef57b7edc21d2da9039b6 v1.18.4.tar.gz
sha256 fc8a8e61375e3ce30563eeb0fd6534c4f48fc20300a72e6ff51cc99cb2703516 v1.19.0.tar.gz
sha256 6165c5b8212ea03be2a465403177318bfe25a54c3e8d66d720344643913a0223 v1.19.1.tar.gz
sha256 76fd7543bc92d4364a11060f43a5131893a76c6e6e9d6de3a6bb6292c110b631 v1.20.0.tar.gz

View File

@ -4,8 +4,8 @@
#
################################################################################
CRIO_BIN_VERSION = v1.19.0
CRIO_BIN_COMMIT = 99c925bebdd9e392f2d575e25f2e6a1082e6c232
CRIO_BIN_VERSION = v1.20.0
CRIO_BIN_COMMIT = d388528dbed26b93c5bc1c89623607a1e597aa57
CRIO_BIN_SITE = https://github.com/cri-o/cri-o/archive
CRIO_BIN_SOURCE = $(CRIO_BIN_VERSION).tar.gz
CRIO_BIN_DEPENDENCIES = host-go libgpgme

View File

@ -29,6 +29,7 @@ storage_driver = "overlay"
# List to pass options to the storage driver. Please refer to
# containers-storage.conf(5) to see all available storage options.
#storage_option = [
# "overlay.mountopt=nodev,metacopy=on",
#]
# The default log directory where all logs will go unless directly specified by
@ -92,11 +93,6 @@ grpc_max_recv_msg_size = 16777216
#default_ulimits = [
#]
# default_runtime is the _name_ of the OCI runtime to be used as the default.
# The name is matched against the runtimes map below. If this value is changed,
# the corresponding existing entry from the runtimes map below will be ignored.
default_runtime = "runc"
# If true, the runtime will not use pivot_root, but instead use MS_MOVE.
no_pivot = false
@ -131,6 +127,12 @@ selinux = false
# will be used. This option supports live configuration reload.
seccomp_profile = ""
# Changes the meaning of an empty seccomp profile. By default
# (and according to CRI spec), an empty profile means unconfined.
# This option tells CRI-O to treat an empty profile as the default profile,
# which might increase security.
seccomp_use_default_when_empty = false
# Used to change the name of the default AppArmor profile of CRI-O. The default
# profile name is "crio-default". This profile only takes effect if the user
# does not specify a profile via the Kubernetes Pod's metadata annotation. If
@ -141,6 +143,9 @@ apparmor_profile = "crio-default"
# Cgroup management implementation used for the runtime.
cgroup_manager = "systemd"
# Specify whether the image pull must be performed in a separate cgroup.
separate_pull_cgroup = ""
# List of default capabilities for containers. If it is empty or commented out,
# only the capabilities defined in the containers json file by the user/kube
# will be added.
@ -174,11 +179,6 @@ hooks_dir = [
"/usr/share/containers/oci/hooks.d",
]
# List of default mounts for each container. **Deprecated:** this option will
# be removed in future versions in favor of default_mounts_file.
default_mounts = [
]
# Path to the file specifying the defaults mounts for each container. The
# format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads
# its default mounts from the following two files:
@ -243,7 +243,8 @@ gid_mappings = ""
ctr_stop_timeout = 30
# manage_ns_lifecycle determines whether we pin and remove namespaces
# and manage their lifecycle
# and manage their lifecycle.
# This option is being deprecated, and will be unconditionally true in the future.
manage_ns_lifecycle = true
# drop_infra_ctr determines whether CRI-O drops the infra container
@ -259,6 +260,11 @@ namespaces_dir = "/var/run"
# pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle
pinns_path = "/usr/bin/pinns"
# default_runtime is the _name_ of the OCI runtime to be used as the default.
# The name is matched against the runtimes map below. If this value is changed,
# the corresponding existing entry from the runtimes map below will be ignored.
default_runtime = "runc"
# The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes.
# The runtime to use is picked based on the runtime_handler provided by the CRI.
# If no runtime_handler is provided, the runtime will be picked based on the level
@ -268,7 +274,8 @@ pinns_path = "/usr/bin/pinns"
# runtime_path = "/path/to/the/executable"
# runtime_type = "oci"
# runtime_root = "/path/to/the/root"
#
# privileged_without_host_devices = false
# allowed_annotations = []
# Where:
# - runtime-handler: name used to identify the runtime
# - runtime_path (optional, string): absolute path to the runtime executable in
@ -279,6 +286,14 @@ pinns_path = "/usr/bin/pinns"
# omitted, an "oci" runtime is assumed.
# - runtime_root (optional, string): root directory for storage of containers
# state.
# - privileged_without_host_devices (optional, bool): an option for restricting
# host devices from being passed to privileged containers.
# - allowed_annotations (optional, array of strings): an option for specifying
# a list of experimental annotations that this runtime handler is allowed to process.
# The currently recognized values are:
# "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod.
# "io.kubernetes.cri-o.Devices" for configuring devices for the pod.
# "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm.
[crio.runtime.runtimes.runc]
@ -287,6 +302,8 @@ runtime_type = "oci"
runtime_root = "/run/runc"
# crun is a fast and lightweight fully featured OCI runtime and C library for
# running containers
#[crio.runtime.runtimes.crun]

View File

@ -29,6 +29,7 @@
# List to pass options to the storage driver. Please refer to
# containers-storage.conf(5) to see all available storage options.
#storage_option = [
# "overlay.mountopt=nodev,metacopy=on",
#]
# The default log directory where all logs will go unless directly specified by
@ -92,11 +93,6 @@ grpc_max_recv_msg_size = 16777216
#default_ulimits = [
#]
# default_runtime is the _name_ of the OCI runtime to be used as the default.
# The name is matched against the runtimes map below. If this value is changed,
# the corresponding existing entry from the runtimes map below will be ignored.
default_runtime = "runc"
# If true, the runtime will not use pivot_root, but instead use MS_MOVE.
no_pivot = false
@ -131,6 +127,12 @@ selinux = false
# will be used. This option supports live configuration reload.
seccomp_profile = ""
# Changes the meaning of an empty seccomp profile. By default
# (and according to CRI spec), an empty profile means unconfined.
# This option tells CRI-O to treat an empty profile as the default profile,
# which might increase security.
seccomp_use_default_when_empty = false
# Used to change the name of the default AppArmor profile of CRI-O. The default
# profile name is "crio-default". This profile only takes effect if the user
# does not specify a profile via the Kubernetes Pod's metadata annotation. If
@ -141,6 +143,9 @@ apparmor_profile = "crio-default"
# Cgroup management implementation used for the runtime.
cgroup_manager = "systemd"
# Specify whether the image pull must be performed in a separate cgroup.
separate_pull_cgroup = ""
# List of default capabilities for containers. If it is empty or commented out,
# only the capabilities defined in the containers json file by the user/kube
# will be added.
@ -174,11 +179,6 @@ hooks_dir = [
"/usr/share/containers/oci/hooks.d",
]
# List of default mounts for each container. **Deprecated:** this option will
# be removed in future versions in favor of default_mounts_file.
default_mounts = [
]
# Path to the file specifying the defaults mounts for each container. The
# format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads
# its default mounts from the following two files:
@ -243,7 +243,8 @@ gid_mappings = ""
ctr_stop_timeout = 30
# manage_ns_lifecycle determines whether we pin and remove namespaces
# and manage their lifecycle
# and manage their lifecycle.
# This option is being deprecated, and will be unconditionally true in the future.
manage_ns_lifecycle = true
# drop_infra_ctr determines whether CRI-O drops the infra container
@ -259,6 +260,11 @@ namespaces_dir = "/var/run"
# pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle
pinns_path = ""
# default_runtime is the _name_ of the OCI runtime to be used as the default.
# The name is matched against the runtimes map below. If this value is changed,
# the corresponding existing entry from the runtimes map below will be ignored.
default_runtime = "runc"
# The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes.
# The runtime to use is picked based on the runtime_handler provided by the CRI.
# If no runtime_handler is provided, the runtime will be picked based on the level
@ -268,7 +274,8 @@ pinns_path = ""
# runtime_path = "/path/to/the/executable"
# runtime_type = "oci"
# runtime_root = "/path/to/the/root"
#
# privileged_without_host_devices = false
# allowed_annotations = []
# Where:
# - runtime-handler: name used to identify the runtime
# - runtime_path (optional, string): absolute path to the runtime executable in
@ -279,6 +286,14 @@ pinns_path = ""
# omitted, an "oci" runtime is assumed.
# - runtime_root (optional, string): root directory for storage of containers
# state.
# - privileged_without_host_devices (optional, bool): an option for restricting
# host devices from being passed to privileged containers.
# - allowed_annotations (optional, array of strings): an option for specifying
# a list of experimental annotations that this runtime handler is allowed to process.
# The currently recognized values are:
# "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod.
# "io.kubernetes.cri-o.Devices" for configuring devices for the pod.
# "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm.
[crio.runtime.runtimes.runc]
@ -287,6 +302,8 @@ runtime_type = "oci"
runtime_root = "/run/runc"
# crun is a fast and lightweight fully featured OCI runtime and C library for
# running containers
#[crio.runtime.runtimes.crun]

View File

@ -1,4 +1,4 @@
sha256 a16846fe076aaf2c9ea2e854c3baba9fb838d916be7fb4b5be332e6c92d907d4 v1.9.3.tar.gz
sha256 5ebaa6e0dbd7fd1863f70d2bc71dc8a94e195c3339c17e3cac4560c9ec5747f8 v2.1.1.tar.gz
sha256 ec5473e51fa28f29af323473fc484f742dc7df23d06d8ba9f217f13382893a71 v2.2.0.tar.gz
sha256 bd86b181251e2308cb52f18410fb52d89df7f130cecf0298bbf9a848fe7daf60 v2.2.1.tar.gz
sha256 3212bad60d945c1169b27da03959f36d92d1d8964645c701a5a82a89118e96d1 v2.2.1.tar.gz

View File

@ -19,7 +19,7 @@
# start from ubuntu 20.04, this image is reasonably small as a starting point
# for a kubernetes node image, it doesn't contain much we don't need
FROM ubuntu:focal-20201106
FROM ubuntu:focal-20210119
ARG BUILDKIT_VERSION="v0.8.1"
@ -46,7 +46,7 @@ COPY entrypoint /usr/local/bin/entrypoint
# - disabling kmsg in journald (these log entries would be confusing)
#
# Next we ensure the /etc/kubernetes/manifests directory exists. Normally
# a kubeadm debain / rpm package would ensure that this exists but we install
# a kubeadm debian / rpm package would ensure that this exists but we install
# freshly built binaries directly when we build the node image.
#
# Finally we adjust tempfiles cleanup to be 1 minute after "boot" instead of 15m
@ -74,6 +74,8 @@ RUN echo "Ensuring scripts are executable ..." \
&& mkdir -p /etc/kubernetes/manifests \
&& echo "Adjusting systemd-tmpfiles timer" \
&& sed -i /usr/lib/systemd/system/systemd-tmpfiles-clean.timer -e 's#OnBootSec=.*#OnBootSec=1min#' \
&& echo "Disabling udev" \
&& systemctl disable udev.service \
&& echo "Modifying /etc/nsswitch.conf to prefer hosts" \
&& sed -i /etc/nsswitch.conf -re 's#^(hosts:\s*).*#\1dns files#'
@ -87,7 +89,7 @@ STOPSIGNAL SIGRTMIN+3
ENTRYPOINT [ "/usr/local/bin/entrypoint", "/sbin/init" ]
ARG COMMIT_SHA
# using base image created by kind https://github.com/kubernetes-sigs/kind/blob/2c0eee40/images/base/Dockerfile
# using base image created by kind https://github.com/kubernetes-sigs/kind/blob/1da0c5e6/images/base/Dockerfile
# which is an ubuntu 20.04 with an entry-point that helps running systemd
# could be changed to any debian that can run systemd
USER root
@ -187,5 +189,5 @@ RUN mkdir -p /kind
RUN rm -rf \
/usr/share/doc/* \
/usr/share/man/* \
/usr/share/local/* \
/usr/share/local/*
RUN echo "kic! Build: ${COMMIT_SHA} Time :$(date)" > "/kic.txt"

View File

@ -19,6 +19,13 @@ set -o nounset
set -o pipefail
set -x
configure_containerd() {
# we need to switch to the 'native' snapshotter on zfs
if [[ "$(stat -f -c %T /kind)" == 'zfs' ]]; then
sed -i 's/snapshotter = "overlayfs"/snapshotter = "native"/' /etc/containerd/config.toml
fi
}
configure_proxy() {
# ensure all processes receive the proxy settings by default
# https://www.freedesktop.org/software/systemd/man/systemd-system.conf.html
@ -78,8 +85,54 @@ fix_mount() {
mount --make-rshared /
}
fix_cgroup_mounts() {
# helper used by fix_cgroup
mount_kubelet_cgroup_root() {
local cgroup_root=$1
local subsystem=$2
if [ -z "${cgroup_root}" ]; then
return 0
fi
mkdir -p "${subsystem}/${cgroup_root}"
if [ "${subsystem}" == "/sys/fs/cgroup/cpuset" ]; then
# This is needed. Otherwise, assigning process to the cgroup
# (or any nested cgroup) would result in ENOSPC.
cat "${subsystem}/cpuset.cpus" > "${subsystem}/${cgroup_root}/cpuset.cpus"
cat "${subsystem}/cpuset.mems" > "${subsystem}/${cgroup_root}/cpuset.mems"
fi
# We need to perform a self bind mount here because otherwise,
# systemd might delete the cgroup unintentionally before the
# kubelet starts.
mount --bind "${subsystem}/${cgroup_root}" "${subsystem}/${cgroup_root}"
}
fix_cgroup() {
if [[ -f "/sys/fs/cgroup/cgroup.controllers" ]]; then
echo 'INFO: detected cgroup v2'
# Both Docker and Podman enable CgroupNS on cgroup v2 hosts by default.
#
# So mostly we do not need to mess around with the cgroup path stuff,
# however, we still need to create the "/kubelet" cgroup at least.
# (Otherwise kubelet fails with `cgroup-root ["kubelet"] doesn't exist` error, see #1969)
#
# The "/kubelet" cgroup is created in ExecStartPre of the kubeadm service.
#
# [FAQ: Why not create "/kubelet" cgroup here?]
# We can't create the cgroup with controllers here, because /sys/fs/cgroup/cgroup.subtree_control is empty.
# And yet we can't write controllers to /sys/fs/cgroup/cgroup.subtree_control by ourselves either, because
# /sys/fs/cgroup/cgroup.procs is not empty at this moment.
#
# After switching from this entrypoint script to systemd, systemd evacuates the processes in the root
# group to "/init.scope" group, so we can write the root subtree_control and create "/kubelet" cgroup.
return
fi
echo 'INFO: detected cgroup v1'
echo 'INFO: fix cgroup mounts for all subsystems'
# see: https://d2iq.com/blog/running-kind-inside-a-kubernetes-cluster-for-continuous-integration
# capture initial state before modifying
local current_cgroup
current_cgroup=$(grep systemd /proc/self/cgroup | cut -d: -f3)
local cgroup_subsystems
cgroup_subsystems=$(findmnt -lun -o source,target -t cgroup | grep "${current_cgroup}" | awk '{print $2}')
# For each cgroup subsystem, Docker does a bind mount from the current
# cgroup to the root of the cgroup subsystem. For instance:
# /sys/fs/cgroup/memory/docker/<cid> -> /sys/fs/cgroup/memory
@ -96,6 +149,7 @@ fix_cgroup_mounts() {
# This regexp finds all /sys/fs/cgroup mounts that are cgroupfs and mounted somewhere other than / - extracting fields 4+
# See https://man7.org/linux/man-pages/man5/proc.5.html for field names
# xref: https://github.com/kubernetes/minikube/pull/9508
# Example inputs:
#
# Docker: /docker/562a56986a84b3cd38d6a32ac43fdfcc8ad4d2473acf2839cbf549273f35c206 /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:143 master:23 - cgroup devices rw,devices
@ -120,11 +174,20 @@ fix_cgroup_mounts() {
fi
done
fi
# kubelet will try to manage cgroups / pods that are not owned by it when
# "nesting" clusters, unless we instruct it to use a different cgroup root.
# We do this, and when doing so we must fixup this alternative root
# currently this is hardcoded to be /kubelet
mount --make-rprivate /sys/fs/cgroup
echo "${cgroup_subsystems}" |
while IFS= read -r subsystem; do
mount_kubelet_cgroup_root "/kubelet" "${subsystem}"
done
}
retryable_fix_cgroup_mounts() {
retryable_fix_cgroup() {
for i in $(seq 0 10); do
fix_cgroup_mounts && return || echo "fix_cgroup failed with exit code $? (retry $i)"
fix_cgroup && return || echo "fix_cgroup failed with exit code $? (retry $i)"
echo "fix_cgroup diagnostics information below:"
mount
sleep 1
@ -273,10 +336,11 @@ enable_network_magic(){
# run pre-init fixups
# NOTE: it's important that we do configure* first in this order to avoid races
configure_containerd
configure_proxy
fix_kmsg
fix_mount
retryable_fix_cgroup_mounts
retryable_fix_cgroup
fix_machine_id
fix_product_name
fix_product_uuid

View File

@ -0,0 +1,41 @@
# Standard Linux Distribution
* First proposed: 2020-12-17
* Authors: Anders F Björklund (@afbjorklund)
## Reviewer Priorities
Please review this proposal with the following priorities:
* Does this fit with minikube's [principles](https://minikube.sigs.k8s.io/docs/concepts/principles/)?
* Are there other approaches to consider?
* Could the implementation be made simpler?
* Are there usability, reliability, or technical debt concerns?
Please leave the above text in your proposal as instructions to the reader.
## Summary
Change the distribution (OS) for the minikube ISO, from Buildroot to Ubuntu.
## Goals
* Use one of the supported Kubernetes OS, like Ubuntu 20.04
* Use the same operating system for KIC base and ISO image
## Non-Goals
* Making major changes to the new standard operating system
* Support production deployments, still intended for learning
## Design Details
Use external system image and external packages, same as for KIC image.
Keep both images available (one being default), during transition period.
## Alternatives Considered
Continue to support custom distro, instead of using a standard distro.
Make current Buildroot OS into standard supported Kubernetes distribution.

View File

@ -290,6 +290,7 @@ fi
readonly TEST_OUT="${TEST_HOME}/testout.txt"
readonly JSON_OUT="${TEST_HOME}/test.json"
readonly HTML_OUT="${TEST_HOME}/test.html"
readonly SUMMARY_OUT="${TEST_HOME}/test_summary.json"
e2e_start_time="$(date -u +%s)"
echo ""
@ -360,9 +361,9 @@ fi
echo ">> Installing gopogh"
if [ "$(uname)" != "Darwin" ]; then
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64 && sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64 && sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
else
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64 && sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-darwin-amd64 && sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
fi
echo ">> Running gopogh"
@ -371,7 +372,8 @@ if test -f "${HTML_OUT}"; then
fi
touch "${HTML_OUT}"
gopogh_status=$(gopogh -in "${JSON_OUT}" -out "${HTML_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}") || true
touch "${SUMMARY_OUT}"
gopogh_status=$(gopogh -in "${JSON_OUT}" -out_html "${HTML_OUT}" -out_summary "${SUMMARY_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}") || true
fail_num=$(echo $gopogh_status | jq '.NumberOfFail')
test_num=$(echo $gopogh_status | jq '.NumberOfTests')
pessimistic_status="${fail_num} / ${test_num} failures"
@ -385,6 +387,9 @@ echo ">> uploading ${JSON_OUT}"
gsutil -qm cp "${JSON_OUT}" "gs://${JOB_GCS_BUCKET}.json" || true
echo ">> uploading ${HTML_OUT}"
gsutil -qm cp "${HTML_OUT}" "gs://${JOB_GCS_BUCKET}.html" || true
echo ">> uploading ${SUMMARY_OUT}"
gsutil -qm cp "${SUMMARY_OUT}" "gs://${JOB_GCS_BUCKET}_summary.json" || true
public_log_url="https://storage.googleapis.com/${JOB_GCS_BUCKET}.txt"

View File

@ -0,0 +1,76 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -x
# Make sure docker is installed and configured
./hack/jenkins/installers/check_install_docker.sh
yes|gcloud auth configure-docker
docker login -u ${DOCKERHUB_USER} -p ${DOCKERHUB_PASS}
docker login https://docker.pkg.github.com -u minikube-bot -p ${access_token}
# Setup variables
now=$(date +%s)
KV=$(egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f 2 | cut -d "-" -f 1)
GCR_REPO=gcr.io/k8s-minikube/kicbase-builds
DH_REPO=kicbase/build
GH_REPO=kicbase-build
export KIC_VERSION=$KV-$now-$ghprbPullId
GCR_IMG=${GCR_REPO}:${KIC_VERSION}
DH_IMG=${DH_REPO}:${KIC_VERSION}
GH_IMG=docker.pkg.github.com/kubernetes/minikube/${GH_REPO}:${KIC_VERSION}
export KICBASE_IMAGE_REGISTRIES="${GCR_IMG} ${DH_IMG}"
# Let's make sure we have the newest kicbase reference
curl -L https://github.com/kubernetes/minikube/raw/master/pkg/drivers/kic/types.go --output types-head.go
# kicbase tags are of the form VERSION-TIMESTAMP-PR, so this grep finds that TIMESTAMP in the middle
# if it doesn't exist, it will just return VERSION, which is covered in the if statement below
HEAD_KIC_TIMESTAMP=$(egrep "Version =" types-head.go | cut -d \" -f 2 | cut -d "-" -f 2)
CURRENT_KIC_TS=$(egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f 2 | cut -d "-" -f 2)
if [[ $HEAD_KIC_TIMESTAMP != v* ]]; then
diff=$((CURRENT_KIC_TS-HEAD_KIC_TIMESTAMP))
if [[ $CURRENT_KIC_TS == v* ]] || [ $diff -lt 0 ]; then
curl -s -H "Authorization: token ${access_token}" \
-H "Accept: application/vnd.github.v3+json" \
-X POST -d "{\"body\": \"Hi ${ghprbPullAuthorLoginMention}, your kicbase info is out of date. Please rebase.\"}" "https://api.github.com/repos/kubernetes/minikube/issues/$ghprbPullId/comments"
exit 1
fi
fi
rm types-head.go
# Build a new kicbase image
yes|make push-kic-base-image
# Abort with error message if above command failed
ec=$?
if [ $ec -gt 0 ]; then
curl -s -H "Authorization: token ${access_token}" \
-H "Accept: application/vnd.github.v3+json" \
-X POST -d "{\"body\": \"Hi ${ghprbPullAuthorLoginMention}, building a new kicbase image failed, please try again.\"}" "https://api.github.com/repos/kubernetes/minikube/issues/$ghprbPullId/comments"
exit $ec
fi
# Retrieve the sha from the new image
docker pull $GCR_IMG
fullsha=$(docker inspect --format='{{index .RepoDigests 0}}' $KICBASE_IMAGE_REGISTRIES)
sha=$(echo ${fullsha} | cut -d ":" -f 2)
# Display the message to the user
message="Hi ${ghprbPullAuthorLoginMention},\\n\\nA new kicbase image is available, please update your PR with the new tag and SHA.\\nIn pkg/drivers/kic/types.go:\\n\\n\\t// Version is the current version of kic\\n\\tVersion = \\\"${KIC_VERSION}\\\"\\n\\t// SHA of the kic base image\\n\\tbaseImageSHA = \\\"${sha}\\\"\\n\\t// The name of the GCR kicbase repository\\n\\tgcrRepo = \\\"${GCR_REPO}\\\"\\n\\t// The name of the Dockerhub kicbase repository\\n\\tdockerhubRepo = \\\"${DH_REPO}\\\"\\nThen run \`make generate-docs\` to update our documentation to reference the new image.\n\nAlternatively, run the following command and commit the changes:\\n\`\`\`\\n sed 's|Version = .*|Version = \\\"${KIC_VERSION}\\\"|;s|baseImageSHA = .*|baseImageSHA = \\\"${sha}\\\"|;s|gcrRepo = .*|gcrRepo = \\\"${GCR_REPO}\\\"|;s|dockerhubRepo = .*|dockerhubRepo = \\\"${DH_REPO}\\\"|' pkg/drivers/kic/types.go > new-types.go; mv new-types.go pkg/drivers/kic/types.go; make generate-docs;\\n\`\`\`"
curl -s -H "Authorization: token ${access_token}" \
-H "Accept: application/vnd.github.v3+json" \
-X POST -d "{\"body\": \"${message}\"}" "https://api.github.com/repos/kubernetes/minikube/issues/$ghprbPullId/comments"

View File

@ -13,6 +13,9 @@
# limitations under the License.
mkdir -p out
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh.exe", "C:\Go\bin\gopogh.exe")
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/minikube-windows-amd64.exe out/
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/e2e-windows-amd64.exe out/
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata .
@ -49,7 +52,7 @@ $elapsed=[math]::Round($elapsed, 2)
Get-Content testout.txt -Encoding ASCII | go tool test2json -t | Out-File -FilePath testout.json -Encoding ASCII
$gopogh_status=gopogh --in testout.json --out testout.html --name "Docker_Windows" -pr $env:MINIKUBE_LOCATION --repo github.com/kubernetes/minikube/ --details $env:COMMIT
$gopogh_status=gopogh --in testout.json --out_html testout.html --out_summary testout_summary.json --name "Docker_Windows" -pr $env:MINIKUBE_LOCATION --repo github.com/kubernetes/minikube/ --details $env:COMMIT
$failures=echo $gopogh_status | jq '.NumberOfFail'
$tests=echo $gopogh_status | jq '.NumberOfTests'
@ -69,6 +72,8 @@ $env:target_url="https://storage.googleapis.com/$gcs_bucket/Docker_Windows.html"
gsutil -qm cp testout.txt gs://$gcs_bucket/Docker_Windowsout.txt
gsutil -qm cp testout.json gs://$gcs_bucket/Docker_Windows.json
gsutil -qm cp testout.html gs://$gcs_bucket/Docker_Windows.html
gsutil -qm cp testout_summary.json gs://$gcs_bucket/Docker_Windows_summary.json
# Update the PR with the new info
$json = "{`"state`": `"$env:status`", `"description`": `"Jenkins: $description`", `"target_url`": `"$env:target_url`", `"context`": `"Docker_Windows`"}"

View File

@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh.exe", "C:\Go\bin\gopogh.exe")
mkdir -p out
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/minikube-windows-amd64.exe out/
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/e2e-windows-amd64.exe out/

View File

@ -12,6 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh.exe", "C:\Go\bin\gopogh.exe")
mkdir -p out
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/minikube-windows-amd64.exe out/
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/e2e-windows-amd64.exe out/

View File

@ -157,9 +157,6 @@ Alternatively to use this addon you can use a vm-based driver:
To track the update on this work in progress feature please check:
https://github.com/kubernetes/minikube/issues/7332`, out.V{"driver_name": cc.Driver, "os_name": runtime.GOOS, "addon_name": name})
}
} else if driver.BareMetal(cc.Driver) {
exit.Message(reason.Usage, `Due to networking limitations of driver {{.driver_name}}, {{.addon_name}} addon is not supported. Try using a different driver.`,
out.V{"driver_name": cc.Driver, "addon_name": name})
}
}

View File

@ -0,0 +1,34 @@
// +build linux
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oci
import (
"syscall"
"golang.org/x/sys/unix"
)
// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode.
func IsCgroup2UnifiedMode() (bool, error) {
var st syscall.Statfs_t
if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil {
return false, err
}
return st.Type == unix.CGROUP2_SUPER_MAGIC, nil
}

View File

@ -0,0 +1,30 @@
// +build !linux
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oci
import (
"runtime"
"github.com/pkg/errors"
)
// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode.
func IsCgroup2UnifiedMode() (bool, error) {
return false, errors.Errorf("Not supported on %s", runtime.GOOS)
}

View File

@ -28,6 +28,7 @@ import (
"github.com/pkg/errors"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/network"
)
// firstSubnetAddr subnet to be used on first kic cluster
@ -70,32 +71,18 @@ func CreateNetwork(ociBin string, networkName string) (net.IP, error) {
if err != nil {
klog.Warningf("failed to get mtu information from the %s's default network %q: %v", ociBin, defaultBridgeName, err)
}
attempts := 0
subnetAddr := firstSubnetAddr
// Rather than iterate through all of the valid subnets, give up at 20 to avoid a lengthy user delay for something that is unlikely to work.
// will be like 192.168.49.0/24 ,...,192.168.239.0/24
for attempts < 20 {
info.gateway, err = tryCreateDockerNetwork(ociBin, subnetAddr, defaultSubnetMask, info.mtu, networkName)
if err == nil {
return info.gateway, nil
}
// don't retry if error is not adddress is taken
if !(errors.Is(err, ErrNetworkSubnetTaken) || errors.Is(err, ErrNetworkGatewayTaken)) {
klog.Errorf("error while trying to create network %v", err)
return nil, errors.Wrap(err, "un-retryable")
}
attempts++
// Find an open subnet by incrementing the 3rd octet by 10 for each try
// 13 times adding 10 firstSubnetAddr "192.168.49.0/24"
// at most it will add up to 169 which is still less than max allowed 255
// this is large enough to try more and not too small to not try enough
// can be tuned in the next iterations
newSubnet := net.ParseIP(subnetAddr).To4()
newSubnet[2] += byte(9 + attempts)
subnetAddr = newSubnet.String()
subnet, err := network.FreeSubnet(firstSubnetAddr, 10, 20)
if err != nil {
klog.Errorf("error while trying to create network: %v", err)
return nil, errors.Wrap(err, "un-retryable")
}
return info.gateway, fmt.Errorf("failed to create network after 20 attempts")
info.gateway, err = tryCreateDockerNetwork(ociBin, subnet.IP, defaultSubnetMask, info.mtu, networkName)
if err != nil {
return info.gateway, fmt.Errorf("failed to create network after 20 attempts")
}
return info.gateway, nil
}
func tryCreateDockerNetwork(ociBin string, subnetAddr string, subnetMask int, mtu int, name string) (net.IP, error) {

View File

@ -100,13 +100,45 @@ func PrepareContainerNode(p CreateParams) error {
return errors.Wrapf(err, "creating volume for %s container", p.Name)
}
klog.Infof("Successfully created a %s volume %s", p.OCIBinary, p.Name)
if err := prepareVolume(p.OCIBinary, p.Image, p.Name); err != nil {
if err := prepareVolumeSideCar(p.OCIBinary, p.Image, p.Name); err != nil {
return errors.Wrapf(err, "preparing volume for %s container", p.Name)
}
klog.Infof("Successfully prepared a %s volume %s", p.OCIBinary, p.Name)
return nil
}
func hasMemoryCgroup() bool {
memcg := true
if runtime.GOOS == "linux" {
var memory string
if cgroup2, err := IsCgroup2UnifiedMode(); err == nil && cgroup2 {
memory = "/sys/fs/cgroup/memory/memsw.limit_in_bytes"
}
if _, err := os.Stat(memory); os.IsNotExist(err) {
klog.Warning("Your kernel does not support memory limit capabilities or the cgroup is not mounted.")
out.WarningT("Cgroup v2 does not allow setting memory, if you want to set memory, please modify your Grub as instructed in https://docs.docker.com/engine/install/linux-postinstall/#your-kernel-does-not-support-cgroup-swap-limit-capabilities")
memcg = false
}
}
return memcg
}
func hasMemorySwapCgroup() bool {
memcgSwap := true
if runtime.GOOS == "linux" {
var memoryswap string
if cgroup2, err := IsCgroup2UnifiedMode(); err == nil && cgroup2 {
memoryswap = "/sys/fs/cgroup/memory/memory.swap.max"
}
if _, err := os.Stat(memoryswap); os.IsNotExist(err) {
// requires CONFIG_MEMCG_SWAP_ENABLED or cgroup_enable=memory in grub
klog.Warning("Your kernel does not support swap limit capabilities or the cgroup is not mounted.")
memcgSwap = false
}
}
return memcgSwap
}
// CreateContainerNode creates a new container node
func CreateContainerNode(p CreateParams) error {
// on windows os, if docker desktop is using Windows Containers. Exit early with error
@ -152,14 +184,8 @@ func CreateContainerNode(p CreateParams) error {
runArgs = append(runArgs, "--ip", p.IP)
}
memcgSwap := true
if runtime.GOOS == "linux" {
if _, err := os.Stat("/sys/fs/cgroup/memory/memsw.limit_in_bytes"); os.IsNotExist(err) {
// requires CONFIG_MEMCG_SWAP_ENABLED or cgroup_enable=memory in grub
klog.Warning("Your kernel does not support swap limit capabilities or the cgroup is not mounted.")
memcgSwap = false
}
}
memcgSwap := hasMemorySwapCgroup()
memcg := hasMemoryCgroup()
// https://www.freedesktop.org/wiki/Software/systemd/ContainerInterface/
var virtualization string
@ -168,11 +194,13 @@ func CreateContainerNode(p CreateParams) error {
runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var:exec", p.Name))
if memcgSwap {
runArgs = append(runArgs, fmt.Sprintf("--memory=%s", p.Memory))
// Disable swap by setting the value to match
runArgs = append(runArgs, fmt.Sprintf("--memory-swap=%s", p.Memory))
}
if memcg {
runArgs = append(runArgs, fmt.Sprintf("--memory=%s", p.Memory))
}
virtualization = "podman" // VIRTUALIZATION_PODMAN
}
if p.OCIBinary == Docker {
@ -180,9 +208,13 @@ func CreateContainerNode(p CreateParams) error {
// ignore apparmore github actions docker: https://github.com/kubernetes/minikube/issues/7624
runArgs = append(runArgs, "--security-opt", "apparmor=unconfined")
runArgs = append(runArgs, fmt.Sprintf("--memory=%s", p.Memory))
// Disable swap by setting the value to match
runArgs = append(runArgs, fmt.Sprintf("--memory-swap=%s", p.Memory))
if memcg {
runArgs = append(runArgs, fmt.Sprintf("--memory=%s", p.Memory))
}
if memcgSwap {
// Disable swap by setting the value to match
runArgs = append(runArgs, fmt.Sprintf("--memory-swap=%s", p.Memory))
}
virtualization = "docker" // VIRTUALIZATION_DOCKER
}

View File

@ -151,9 +151,9 @@ func createVolume(ociBin string, profile string, nodeName string) error {
return nil
}
// prepareVolume will copy the initial content of the mount point by starting a container to check the expected content
func prepareVolume(ociBin string, imageName string, nodeName string) error {
cmdArgs := []string{"run", "--rm", "--entrypoint", "/usr/bin/test"}
// prepareVolumeSideCar will copy the initial content of the mount point by starting a temporary container to check the expected content
func prepareVolumeSideCar(ociBin string, imageName string, nodeName string) error {
cmdArgs := []string{"run", "--rm", "--name", fmt.Sprintf("%s-preload-sidecar", nodeName), "--label", fmt.Sprintf("%s=%s", CreatedByLabelKey, "true"), "--label", fmt.Sprintf("%s=%s", ProfileLabelKey, nodeName), "--entrypoint", "/usr/bin/test"}
cmdArgs = append(cmdArgs, "-v", fmt.Sprintf("%s:/var", nodeName), imageName, "-d", "/var/lib")
cmd := exec.Command(ociBin, cmdArgs...)
if _, err := runCmd(cmd); err != nil {

View File

@ -24,25 +24,31 @@ import (
const (
// Version is the current version of kic
Version = "v0.0.17"
Version = "v0.0.17-1613704090-10418"
// SHA of the kic base image
baseImageSHA = "1cd2e039ec9d418e6380b2fa0280503a72e5b282adea674ee67882f59f4f546e"
baseImageSHA = "876f620cdc40b4616e4e11db64524c520e252ede006357eaa963488ae852b6ed"
// The name of the GCR kicbase repository
gcrRepo = "gcr.io/k8s-minikube/kicbase-builds"
// The name of the Dockerhub kicbase repository
dockerhubRepo = "kicbase/build"
// The name of the Github Packages repository
ghRepo = "kicbase"
)
var (
// BaseImage is the base image is used to spin up kic containers. it uses same base-image as kind.
BaseImage = fmt.Sprintf("gcr.io/k8s-minikube/kicbase:%s@sha256:%s", Version, baseImageSHA)
BaseImage = fmt.Sprintf("%s:%s@sha256:%s", gcrRepo, Version, baseImageSHA)
// FallbackImages are backup base images in case gcr isn't available
FallbackImages = []string{
// the fallback of BaseImage in case gcr.io is not available. stored in docker hub
// same image is push to https://github.com/kicbase/stable
fmt.Sprintf("kicbase/stable:%s@sha256:%s", Version, baseImageSHA),
fmt.Sprintf("%s:%s@sha256:%s", dockerhubRepo, Version, baseImageSHA),
// the fallback of BaseImage in case gcr.io is not available. stored in github packages https://github.com/kubernetes/minikube/packages/206071
// github packages docker does _NOT_ support pulling by sha as mentioned in the docs:
// https://help.github.com/en/packages/using-github-packages-with-your-projects-ecosystem/configuring-docker-for-use-with-github-packages
fmt.Sprintf("docker.pkg.github.com/kubernetes/minikube/kicbase:%s", Version),
fmt.Sprintf("docker.pkg.github.com/kubernetes/minikube/%s:%s", ghRepo, Version),
}
)

View File

@ -31,6 +31,7 @@ import (
"github.com/docker/machine/libmachine/log"
libvirt "github.com/libvirt/libvirt-go"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/network"
"k8s.io/minikube/pkg/util/retry"
)
@ -38,16 +39,27 @@ import (
// https://play.golang.org/p/m8TNTtygK0
const networkTmpl = `
<network>
<name>{{.PrivateNetwork}}</name>
<name>{{.Name}}</name>
<dns enable='no'/>
<ip address='192.168.39.1' netmask='255.255.255.0'>
{{with .Parameters}}
<ip address='{{.Gateway}}' netmask='{{.Netmask}}'>
<dhcp>
<range start='192.168.39.2' end='192.168.39.254'/>
<range start='{{.ClientMin}}' end='{{.ClientMax}}'/>
</dhcp>
</ip>
{{end}}
</network>
`
type kvmNetwork struct {
Name string
network.Parameters
}
// firstSubnetAddr is starting subnet to try for new KVM cluster,
// avoiding possible conflict with other local networks by further incrementing it up to 20 times by 10.
const firstSubnetAddr = "192.168.39.0"
// setupNetwork ensures that the network with `name` is started (active)
// and has the autostart feature set.
func setupNetwork(conn *libvirt.Connect, name string) error {
@ -145,10 +157,20 @@ func (d *Driver) createNetwork() error {
// Only create the private network if it does not already exist
netp, err := conn.LookupNetworkByName(d.PrivateNetwork)
if err != nil {
subnet, err := network.FreeSubnet(firstSubnetAddr, 10, 20)
if err != nil {
log.Debugf("error while trying to create network: %v", err)
return errors.Wrap(err, "un-retryable")
}
tryNet := kvmNetwork{
Name: d.PrivateNetwork,
Parameters: *subnet,
}
// create the XML for the private network from our networkTmpl
tmpl := template.Must(template.New("network").Parse(networkTmpl))
var networkXML bytes.Buffer
if err := tmpl.Execute(&networkXML, d); err != nil {
if err := tmpl.Execute(&networkXML, tryNet); err != nil {
return errors.Wrap(err, "executing network template")
}
@ -173,6 +195,7 @@ func (d *Driver) createNetwork() error {
if err := retry.Local(create, 10*time.Second); err != nil {
return errors.Wrapf(err, "creating network %s", d.PrivateNetwork)
}
log.Debugf("Network %s created", d.PrivateNetwork)
}
defer func() {
if netp != nil {
@ -201,7 +224,7 @@ func (d *Driver) deleteNetwork() error {
log.Warnf("Network %s does not exist. Skipping deletion", d.PrivateNetwork)
return nil
}
return errors.Wrapf(err, "failed looking for network %s", d.PrivateNetwork)
return errors.Wrapf(err, "failed looking up network %s", d.PrivateNetwork)
}
defer func() { _ = network.Free() }()
log.Debugf("Network %s exists", d.PrivateNetwork)
@ -213,58 +236,25 @@ func (d *Driver) deleteNetwork() error {
// when we reach this point, it means it is safe to delete the network
// cannot destroy an inactive network - try to activate it first
log.Debugf("Trying to reactivate network %s first (if needed)...", d.PrivateNetwork)
activate := func() error {
log.Debugf("Trying to delete network %s...", d.PrivateNetwork)
delete := func() error {
active, err := network.IsActive()
if err == nil && active {
return nil
}
if err != nil {
return err
}
// inactive, try to activate
if err := network.Create(); err != nil {
return err
if active {
log.Debugf("Destroying active network %s", d.PrivateNetwork)
if err := network.Destroy(); err != nil {
return err
}
}
return errors.Errorf("needs confirmation") // confirm in the next cycle
log.Debugf("Undefining inactive network %s", d.PrivateNetwork)
return network.Undefine()
}
if err := retry.Local(activate, 10*time.Second); err != nil {
log.Debugf("Reactivating network %s failed, will continue anyway...", d.PrivateNetwork)
}
log.Debugf("Trying to destroy network %s...", d.PrivateNetwork)
destroy := func() error {
if err := network.Destroy(); err != nil {
return err
}
active, err := network.IsActive()
if err == nil && !active {
return nil
}
return errors.Errorf("retrying %v", err)
}
if err := retry.Local(destroy, 10*time.Second); err != nil {
return errors.Wrap(err, "destroying network")
}
log.Debugf("Trying to undefine network %s...", d.PrivateNetwork)
undefine := func() error {
if err := network.Undefine(); err != nil {
return err
}
netp, err := conn.LookupNetworkByName(d.PrivateNetwork)
if netp != nil {
_ = netp.Free()
}
if lvErr(err).Code == libvirt.ERR_NO_NETWORK {
return nil
}
return errors.Errorf("retrying %v", err)
}
if err := retry.Local(undefine, 10*time.Second); err != nil {
return errors.Wrap(err, "undefining network")
if err := retry.Local(delete, 10*time.Second); err != nil {
return errors.Wrap(err, "deleting network")
}
log.Debugf("Network %s deleted", d.PrivateNetwork)
return nil
}

View File

@ -25,7 +25,6 @@ import (
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/style"
"k8s.io/minikube/pkg/minikube/vmpath"
"k8s.io/minikube/pkg/version"
)
@ -689,19 +688,18 @@ func GenerateTemplateData(addon *Addon, cfg config.KubernetesConfig) interface{}
opts.Registries[name] = "" // Avoid nil access when rendering
}
// Send messages to stderr due to some tests rely on stdout
if override, ok := opts.CustomRegistries[name]; ok {
out.ErrT(style.Option, "Using image {{.registry}}{{.image}}", out.V{
out.Infof("Using image {{.registry}}{{.image}}", out.V{
"registry": override,
"image": image,
})
} else if opts.ImageRepository != "" {
out.ErrT(style.Option, "Using image {{.registry}}{{.image}} (global image repository)", out.V{
out.Infof("Using image {{.registry}}{{.image}} (global image repository)", out.V{
"registry": opts.ImageRepository,
"image": image,
})
} else {
out.ErrT(style.Option, "Using image {{.registry}}{{.image}}", out.V{
out.Infof("Using image {{.registry}}{{.image}}", out.V{
"registry": opts.Registries[name],
"image": image,
})

View File

@ -25,6 +25,7 @@ import (
"github.com/spf13/viper"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/version"
)
// userName pulls the user flag, if empty gets the os username.
@ -54,8 +55,8 @@ func Log(startTime time.Time) {
if !shouldLog() {
return
}
e := newEntry(os.Args[1], args(), userName(), startTime, time.Now())
if err := appendToLog(e); err != nil {
r := newRow(os.Args[1], args(), userName(), version.GetVersion(), startTime, time.Now())
if err := appendToLog(r); err != nil {
klog.Error(err)
}
}

View File

@ -1,49 +0,0 @@
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package audit
import (
"time"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
)
// entry represents the execution of a command.
type entry struct {
data map[string]string
}
// Type returns the cloud events compatible type of this struct.
func (e *entry) Type() string {
return "io.k8s.sigs.minikube.audit"
}
// newEntry returns a new audit type.
func newEntry(command string, args string, user string, startTime time.Time, endTime time.Time) *entry {
return &entry{
map[string]string{
"args": args,
"command": command,
"endTime": endTime.Format(constants.TimeFormat),
"profile": viper.GetString(config.ProfileName),
"startTime": startTime.Format(constants.TimeFormat),
"user": user,
},
}
}

View File

@ -30,7 +30,7 @@ var currentLogFile *os.File
// setLogFile sets the logPath and creates the log file if it doesn't exist.
func setLogFile() error {
lp := localpath.AuditLog()
f, err := os.OpenFile(lp, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
f, err := os.OpenFile(lp, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0644)
if err != nil {
return fmt.Errorf("unable to open %s: %v", lp, err)
}
@ -38,15 +38,15 @@ func setLogFile() error {
return nil
}
// appendToLog appends the audit entry to the log file.
func appendToLog(entry *entry) error {
// appendToLog appends the row to the log file.
func appendToLog(row *row) error {
if currentLogFile == nil {
if err := setLogFile(); err != nil {
return err
}
}
e := register.CloudEvent(entry, entry.data)
bs, err := e.MarshalJSON()
ce := register.CloudEvent(row, row.toMap())
bs, err := ce.MarshalJSON()
if err != nil {
return fmt.Errorf("error marshalling event: %v", err)
}

View File

@ -42,8 +42,8 @@ func TestLogFile(t *testing.T) {
defer func() { currentLogFile = &oldLogFile }()
currentLogFile = f
e := newEntry("start", "-v", "user1", time.Now(), time.Now())
if err := appendToLog(e); err != nil {
r := newRow("start", "-v", "user1", "v0.17.1", time.Now(), time.Now())
if err := appendToLog(r); err != nil {
t.Fatalf("Error appendingToLog: %v", err)
}

View File

@ -0,0 +1,66 @@
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package audit
import (
"bufio"
"fmt"
)
// RawReport contains the information required to generate formatted reports.
type RawReport struct {
headers []string
rows []row
}
// Report is created using the last n lines from the log file.
func Report(lastNLines int) (*RawReport, error) {
if lastNLines <= 0 {
return nil, fmt.Errorf("last n lines must be 1 or greater")
}
if currentLogFile == nil {
if err := setLogFile(); err != nil {
return nil, fmt.Errorf("failed to set the log file: %v", err)
}
}
var logs []string
s := bufio.NewScanner(currentLogFile)
for s.Scan() {
// pop off the earliest line if already at desired log length
if len(logs) == lastNLines {
logs = logs[1:]
}
logs = append(logs, s.Text())
}
if err := s.Err(); err != nil {
return nil, fmt.Errorf("failed to read from audit file: %v", err)
}
rows, err := logsToRows(logs)
if err != nil {
return nil, fmt.Errorf("failed to convert logs to rows: %v", err)
}
r := &RawReport{
[]string{"Command", "Args", "Profile", "User", "Version", "Start Time", "End Time"},
rows,
}
return r, nil
}
// ASCIITable creates a formatted table using the headers and rows from the report.
func (rr *RawReport) ASCIITable() string {
return rowsToASCIITable(rr.rows, rr.headers)
}

View File

@ -0,0 +1,56 @@
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package audit
import (
"io/ioutil"
"os"
"testing"
)
func TestReport(t *testing.T) {
f, err := ioutil.TempFile("", "audit.json")
if err != nil {
t.Fatalf("failed creating temporary file: %v", err)
}
defer os.Remove(f.Name())
s := `{"data":{"args":"-p mini1","command":"start","endTime":"Wed, 03 Feb 2021 15:33:05 MST","profile":"mini1","startTime":"Wed, 03 Feb 2021 15:30:33 MST","user":"user1"},"datacontenttype":"application/json","id":"9b7593cb-fbec-49e5-a3ce-bdc2d0bfb208","source":"https://minikube.sigs.k8s.io/","specversion":"1.0","type":"io.k8s.si gs.minikube.audit"}
{"data":{"args":"-p mini1","command":"start","endTime":"Wed, 03 Feb 2021 15:33:05 MST","profile":"mini1","startTime":"Wed, 03 Feb 2021 15:30:33 MST","user":"user1"},"datacontenttype":"application/json","id":"9b7593cb-fbec-49e5-a3ce-bdc2d0bfb208","source":"https://minikube.sigs.k8s.io/","specversion":"1.0","type":"io.k8s.si gs.minikube.audit"}
{"data":{"args":"--user user2","command":"logs","endTime":"Tue, 02 Feb 2021 16:46:20 MST","profile":"minikube","startTime":"Tue, 02 Feb 2021 16:46:00 MST","user":"user2"},"datacontenttype":"application/json","id":"fec03227-2484-48b6-880a-88fd010b5efd","source":"https://minikube.sigs.k8s.io/","specversion":"1.0","type":"io.k8s.sigs.minikube.audit"}`
if _, err := f.WriteString(s); err != nil {
t.Fatalf("failed writing to file: %v", err)
}
if _, err := f.Seek(0, 0); err != nil {
t.Fatalf("failed seeking to start of file: %v", err)
}
oldLogFile := *currentLogFile
defer func() { currentLogFile = &oldLogFile }()
currentLogFile = f
wantedLines := 2
r, err := Report(wantedLines)
if err != nil {
t.Fatalf("failed to create report: %v", err)
}
if len(r.rows) != wantedLines {
t.Errorf("report has %d lines of logs, want %d", len(r.rows), wantedLines)
}
}

126
pkg/minikube/audit/row.go Normal file
View File

@ -0,0 +1,126 @@
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package audit
import (
"bytes"
"encoding/json"
"fmt"
"time"
"github.com/olekukonko/tablewriter"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
)
// row is the log of a single command.
type row struct {
args string
command string
endTime string
profile string
startTime string
user string
version string
Data map[string]string `json:"data"`
}
// Type returns the cloud events compatible type of this struct.
func (e *row) Type() string {
return "io.k8s.sigs.minikube.audit"
}
// assignFields converts the map values to their proper fields,
// to be used when converting from JSON Cloud Event format.
func (e *row) assignFields() {
e.args = e.Data["args"]
e.command = e.Data["command"]
e.endTime = e.Data["endTime"]
e.profile = e.Data["profile"]
e.startTime = e.Data["startTime"]
e.user = e.Data["user"]
e.version = e.Data["version"]
}
// toMap combines fields into a string map,
// to be used when converting to JSON Cloud Event format.
func (e *row) toMap() map[string]string {
return map[string]string{
"args": e.args,
"command": e.command,
"endTime": e.endTime,
"profile": e.profile,
"startTime": e.startTime,
"user": e.user,
"version": e.version,
}
}
// newRow creates a new audit row.
func newRow(command string, args string, user string, version string, startTime time.Time, endTime time.Time, profile ...string) *row {
p := viper.GetString(config.ProfileName)
if len(profile) > 0 {
p = profile[0]
}
return &row{
args: args,
command: command,
endTime: endTime.Format(constants.TimeFormat),
profile: p,
startTime: startTime.Format(constants.TimeFormat),
user: user,
version: version,
}
}
// toFields converts a row to an array of fields,
// to be used when converting to a table.
func (e *row) toFields() []string {
return []string{e.command, e.args, e.profile, e.user, e.version, e.startTime, e.endTime}
}
// logsToRows converts audit logs into arrays of rows.
func logsToRows(logs []string) ([]row, error) {
rows := []row{}
for _, l := range logs {
r := row{}
if err := json.Unmarshal([]byte(l), &r); err != nil {
return nil, fmt.Errorf("failed to unmarshal %q: %v", l, err)
}
r.assignFields()
rows = append(rows, r)
}
return rows, nil
}
// rowsToASCIITable converts rows into a formatted ASCII table.
func rowsToASCIITable(rows []row, headers []string) string {
c := [][]string{}
for _, r := range rows {
c = append(c, r.toFields())
}
b := new(bytes.Buffer)
t := tablewriter.NewWriter(b)
t.SetHeader(headers)
t.SetAutoFormatHeaders(false)
t.SetBorders(tablewriter.Border{Left: true, Top: true, Right: true, Bottom: true})
t.SetCenterSeparator("|")
t.AppendBulk(c)
t.Render()
return b.String()
}

View File

@ -0,0 +1,139 @@
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package audit
import (
"encoding/json"
"fmt"
"strings"
"testing"
"time"
"k8s.io/minikube/pkg/minikube/constants"
)
func TestRow(t *testing.T) {
c := "start"
a := "--alsologtostderr"
p := "profile1"
u := "user1"
v := "v0.17.1"
st := time.Now()
stFormatted := st.Format(constants.TimeFormat)
et := time.Now()
etFormatted := et.Format(constants.TimeFormat)
r := newRow(c, a, u, v, st, et, p)
t.Run("NewRow", func(t *testing.T) {
tests := []struct {
key string
got string
want string
}{
{"command", r.command, c},
{"args", r.args, a},
{"profile", r.profile, p},
{"user", r.user, u},
{"version", r.version, v},
{"startTime", r.startTime, stFormatted},
{"endTime", r.endTime, etFormatted},
}
for _, tt := range tests {
if tt.got != tt.want {
t.Errorf("row.%s = %s; want %s", tt.key, tt.got, tt.want)
}
}
})
t.Run("Type", func(t *testing.T) {
got := r.Type()
want := "io.k8s.sigs.minikube.audit"
if got != want {
t.Errorf("Type() = %s; want %s", got, want)
}
})
t.Run("toMap", func(t *testing.T) {
m := r.toMap()
tests := []struct {
key string
want string
}{
{"command", c},
{"args", a},
{"profile", p},
{"user", u},
{"version", v},
{"startTime", stFormatted},
{"endTime", etFormatted},
}
for _, tt := range tests {
got := m[tt.key]
if got != tt.want {
t.Errorf("map[%s] = %s; want %s", tt.key, got, tt.want)
}
}
})
t.Run("toFields", func(t *testing.T) {
got := r.toFields()
gotString := strings.Join(got, ",")
want := []string{c, a, p, u, v, stFormatted, etFormatted}
wantString := strings.Join(want, ",")
if gotString != wantString {
t.Errorf("toFields() = %s; want %s", gotString, wantString)
}
})
t.Run("assignFields", func(t *testing.T) {
l := fmt.Sprintf(`{"data":{"args":"%s","command":"%s","endTime":"%s","profile":"%s","startTime":"%s","user":"%s","version":"v0.17.1"},"datacontenttype":"application/json","id":"bc6ec9d4-0d08-4b57-ac3b-db8d67774768","source":"https://minikube.sigs.k8s.io/","specversion":"1.0","type":"io.k8s.sigs.minikube.audit"}`, a, c, etFormatted, p, stFormatted, u)
r := &row{}
if err := json.Unmarshal([]byte(l), r); err != nil {
t.Fatalf("failed to unmarshal log: %v", err)
}
r.assignFields()
tests := []struct {
key string
got string
want string
}{
{"command", r.command, c},
{"args", r.args, a},
{"profile", r.profile, p},
{"user", r.user, u},
{"version", r.version, v},
{"startTime", r.startTime, stFormatted},
{"endTime", r.endTime, etFormatted},
}
for _, tt := range tests {
if tt.got != tt.want {
t.Errorf("singleEntry.%s = %s; want %s", tt.key, tt.got, tt.want)
}
}
})
}

View File

@ -36,7 +36,7 @@ func WaitForDefaultSA(cs *kubernetes.Clientset, timeout time.Duration) error {
// equivalent to manual check of 'kubectl --context profile get serviceaccount default'
sas, err := cs.CoreV1().ServiceAccounts("default").List(meta.ListOptions{})
if err != nil {
klog.Infof("temproary error waiting for default SA: %v", err)
klog.Infof("temporary error waiting for default SA: %v", err)
return false, nil
}
for _, sa := range sas.Items {

View File

@ -37,6 +37,8 @@ const (
NodeReadyKey = "node_ready"
// KubeletKey is the name used in the flags for waiting for the kubelet status to be ready
KubeletKey = "kubelet"
// ExtraKey is the name used for extra waiting for pods in CorePodsList to be Ready
ExtraKey = "extra"
)
// vars related to the --wait flag
@ -44,9 +46,9 @@ var (
// DefaultComponents is map of the the default components to wait for
DefaultComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true}
// NoWaitComponents is map of componets to wait for if specified 'none' or 'false'
NoComponents = map[string]bool{APIServerWaitKey: false, SystemPodsWaitKey: false, DefaultSAWaitKey: false, AppsRunningKey: false, NodeReadyKey: false, KubeletKey: false}
NoComponents = map[string]bool{APIServerWaitKey: false, SystemPodsWaitKey: false, DefaultSAWaitKey: false, AppsRunningKey: false, NodeReadyKey: false, KubeletKey: false, ExtraKey: false}
// AllComponents is map for waiting for all components.
AllComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true, DefaultSAWaitKey: true, AppsRunningKey: true, NodeReadyKey: true, KubeletKey: true}
AllComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true, DefaultSAWaitKey: true, AppsRunningKey: true, NodeReadyKey: true, KubeletKey: true, ExtraKey: true}
// DefaultWaitList is list of all default components to wait for. only names to be used for start flags.
DefaultWaitList = []string{APIServerWaitKey, SystemPodsWaitKey}
// AllComponentsList list of all valid components keys to wait for. only names to be used used for start flags.
@ -60,6 +62,15 @@ var (
"kube-proxy",
"kube-scheduler",
}
// CorePodsList is a list of essential pods for running kurnetes to extra wait for them to be Ready
CorePodsList = []string{
"kube-dns", // coredns
"etcd",
"kube-apiserver",
"kube-controller-manager",
"kube-proxy",
"kube-scheduler",
}
)
// ShouldWait will return true if the config says need to wait

View File

@ -0,0 +1,133 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package kverify verifies a running Kubernetes cluster is healthy
package kverify
import (
"fmt"
"strings"
"time"
"github.com/pkg/errors"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
// WaitExtra calls WaitForPodReadyByLabel for each pod in labels list and returns any errors occurred.
func WaitExtra(cs *kubernetes.Clientset, labels []string, timeout time.Duration) error {
klog.Infof("extra waiting for kube-system core pods %s to be Ready ...", labels)
start := time.Now()
defer func() {
klog.Infof("duration metric: took %s for extra waiting for kube-system core pods to be Ready ...", time.Since(start))
}()
var errs []string
for _, label := range labels {
if err := waitForPodReadyByLabel(cs, label, "kube-system", timeout); err != nil {
errs = append(errs, fmt.Sprintf("%q: %q", label, err.Error()))
}
}
if errs != nil {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
// waitForPodReadyByLabel waits for pod with label ([key:]val) in a namespace to be in Ready condition.
// If namespace is not provided, it defaults to "kube-system".
// If label key is not provided, it will try with "component" and "k8s-app".
func waitForPodReadyByLabel(cs *kubernetes.Clientset, label, namespace string, timeout time.Duration) error {
klog.Infof("waiting %v for pod with %q label in %q namespace to be Ready ...", timeout, label, namespace)
start := time.Now()
defer func() {
klog.Infof("duration metric: took %v to run WaitForPodReadyByLabel for pod with %q label in %q namespace ...", time.Since(start), label, namespace)
}()
if namespace == "" {
namespace = "kube-system"
}
lkey := ""
lval := ""
l := strings.Split(label, ":")
switch len(l) {
case 1: // treat as no label key provided, just val
lval = strings.TrimSpace(l[0])
case 2:
lkey = strings.TrimSpace(l[0])
lval = strings.TrimSpace(l[1])
default:
return fmt.Errorf("pod label %q is malformed", label)
}
lap := time.Now()
checkReady := func() (bool, error) {
if time.Since(start) > timeout {
return false, fmt.Errorf("wait for pod with %q label in %q namespace to be Ready timed out", label, namespace)
}
pods, err := cs.CoreV1().Pods(namespace).List(meta.ListOptions{})
if err != nil {
klog.Infof("error listing pods in %q namespace, will retry: %v", namespace, err)
return false, nil
}
for _, pod := range pods.Items {
for k, v := range pod.ObjectMeta.Labels {
if ((lkey == "" && (k == "component" || k == "k8s-app")) || lkey == k) && v == lval {
ready, reason := IsPodReady(&pod)
if ready {
klog.Info(reason)
return true, nil
}
// reduce log spam
if time.Since(lap) > (1 * time.Second) {
klog.Info(reason)
lap = time.Now()
}
return false, nil
}
}
}
klog.Infof("pod with %q label in %q namespace was not found, will retry", label, namespace)
return false, nil
}
if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, checkReady); err != nil {
return errors.Wrapf(err, "wait pod Ready")
}
return nil
}
// IsPodReady returns if pod is Ready and verbose reason.
func IsPodReady(pod *core.Pod) (ready bool, reason string) {
if pod.Status.Phase != core.PodRunning {
return false, fmt.Sprintf("pod %q in %q namespace is not Running: %+v", pod.Name, pod.Namespace, pod.Status)
}
for _, c := range pod.Status.Conditions {
if c.Type == core.PodReady {
if c.Status != core.ConditionTrue {
return false, fmt.Sprintf("pod %q in %q namespace is not Ready: %+v", pod.Name, pod.Namespace, c)
}
return true, fmt.Sprintf("pod %q in %q namespace is Ready: %+v", pod.Name, pod.Namespace, c)
}
}
return false, fmt.Sprintf("pod %q in %q namespace does not have %q status: %+v", pod.Name, pod.Namespace, core.PodReady, pod.Status)
}

View File

@ -36,6 +36,7 @@ import (
"github.com/docker/machine/libmachine"
"github.com/docker/machine/libmachine/state"
"github.com/pkg/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
@ -470,6 +471,12 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
return nil
}
if cfg.VerifyComponents[kverify.ExtraKey] {
if err := kverify.WaitExtra(client, kverify.CorePodsList, timeout); err != nil {
return errors.Wrap(err, "extra waiting")
}
}
cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c})
if err != nil {
return errors.Wrapf(err, "create runtme-manager %s", cfg.KubernetesConfig.ContainerRuntime)
@ -504,11 +511,11 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
}
}
}
if cfg.VerifyComponents[kverify.KubeletKey] {
if err := kverify.WaitForService(k.c, "kubelet", timeout); err != nil {
return errors.Wrap(err, "waiting for kubelet")
}
}
if cfg.VerifyComponents[kverify.NodeReadyKey] {
@ -658,6 +665,35 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error {
}
}
if cfg.VerifyComponents[kverify.ExtraKey] {
// after kubelet is restarted (with 'kubeadm init phase kubelet-start' above),
// it appears as to be immediately Ready as well as all kube-system pods,
// then (after ~10sec) it realises it has some changes to apply, implying also pods restarts,
// and by that time we would exit completely, so we wait until kubelet begins restarting pods
klog.Info("waiting for restarted kubelet to initialise ...")
start := time.Now()
wait := func() error {
pods, err := client.CoreV1().Pods("kube-system").List(meta.ListOptions{})
if err != nil {
return err
}
for _, pod := range pods.Items {
if pod.Labels["tier"] == "control-plane" {
if ready, _ := kverify.IsPodReady(&pod); !ready {
return nil
}
}
}
return fmt.Errorf("kubelet not initialised")
}
_ = retry.Expo(wait, 250*time.Millisecond, 1*time.Minute)
klog.Infof("kubelet initialised")
klog.Infof("duration metric: took %s waiting for restarted kubelet to initialise ...", time.Since(start))
if err := kverify.WaitExtra(client, kverify.CorePodsList, kconst.DefaultControlPlaneTimeout); err != nil {
return errors.Wrap(err, "extra")
}
}
cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c})
if err != nil {
return errors.Wrap(err, "runtime")
@ -698,6 +734,7 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error {
if err := bsutil.AdjustResourceLimits(k.c); err != nil {
klog.Warningf("unable to adjust resource limits: %v", err)
}
return nil
}

View File

@ -47,7 +47,11 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) {
}
return net.ParseIP(ip), nil
case driver.KVM2:
return net.ParseIP("192.168.39.1"), nil
ip, err := host.Driver.GetIP()
if err != nil {
return []byte{}, errors.Wrap(err, "Error getting VM/Host IP address")
}
return net.ParseIP(ip), nil
case driver.HyperV:
v := reflect.ValueOf(host.Driver).Elem()
var hypervVirtualSwitch string

View File

@ -151,9 +151,12 @@ func New(c Config) (Manager, error) {
switch c.Type {
case "", "docker":
return &Docker{
Socket: c.Socket,
Runner: c.Runner,
Init: sm,
Socket: c.Socket,
Runner: c.Runner,
ImageRepository: c.ImageRepository,
KubernetesVersion: c.KubernetesVersion,
Init: sm,
UseCRI: (c.Socket != ""), // !dockershim
}, nil
case "crio", "cri-o":
return &CRIO{

View File

@ -23,6 +23,7 @@ import (
"strings"
"time"
"github.com/blang/semver"
"github.com/pkg/errors"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/assets"
@ -56,9 +57,12 @@ func (e *ErrISOFeature) Error() string {
// Docker contains Docker runtime state
type Docker struct {
Socket string
Runner CommandRunner
Init sysinit.Manager
Socket string
Runner CommandRunner
ImageRepository string
KubernetesVersion semver.Version
Init sysinit.Manager
UseCRI bool
}
// Name is a human readable name for Docker
@ -181,6 +185,14 @@ func (r *Docker) CGroupDriver() (string, error) {
// KubeletOptions returns kubelet options for a runtime.
func (r *Docker) KubeletOptions() map[string]string {
if r.UseCRI {
return map[string]string{
"container-runtime": "remote",
"container-runtime-endpoint": r.SocketPath(),
"image-service-endpoint": r.SocketPath(),
"runtime-request-timeout": "15m",
}
}
return map[string]string{
"container-runtime": "docker",
}
@ -188,6 +200,9 @@ func (r *Docker) KubeletOptions() map[string]string {
// ListContainers returns a list of containers
func (r *Docker) ListContainers(o ListOptions) ([]string, error) {
if r.UseCRI {
return listCRIContainers(r.Runner, "", o)
}
args := []string{"ps"}
switch o.State {
case All:
@ -220,6 +235,9 @@ func (r *Docker) ListContainers(o ListOptions) ([]string, error) {
// KillContainers forcibly removes a running container based on ID
func (r *Docker) KillContainers(ids []string) error {
if r.UseCRI {
return killCRIContainers(r.Runner, ids)
}
if len(ids) == 0 {
return nil
}
@ -234,6 +252,9 @@ func (r *Docker) KillContainers(ids []string) error {
// StopContainers stops a running container based on ID
func (r *Docker) StopContainers(ids []string) error {
if r.UseCRI {
return stopCRIContainers(r.Runner, ids)
}
if len(ids) == 0 {
return nil
}
@ -248,6 +269,9 @@ func (r *Docker) StopContainers(ids []string) error {
// PauseContainers pauses a running container based on ID
func (r *Docker) PauseContainers(ids []string) error {
if r.UseCRI {
return pauseCRIContainers(r.Runner, "", ids)
}
if len(ids) == 0 {
return nil
}
@ -262,6 +286,9 @@ func (r *Docker) PauseContainers(ids []string) error {
// UnpauseContainers unpauses a container based on ID
func (r *Docker) UnpauseContainers(ids []string) error {
if r.UseCRI {
return unpauseCRIContainers(r.Runner, "", ids)
}
if len(ids) == 0 {
return nil
}
@ -276,6 +303,9 @@ func (r *Docker) UnpauseContainers(ids []string) error {
// ContainerLogCmd returns the command to retrieve the log for a container based on ID
func (r *Docker) ContainerLogCmd(id string, len int, follow bool) string {
if r.UseCRI {
return criContainerLogCmd(r.Runner, id, len, follow)
}
var cmd strings.Builder
cmd.WriteString("docker logs ")
if len > 0 {

View File

@ -16,18 +16,30 @@ limitations under the License.
package driver
import "os/exec"
import (
"os/exec"
"runtime"
)
// supportedDrivers is a list of supported drivers on Darwin.
var supportedDrivers = []string{
VirtualBox,
Parallels,
VMwareFusion,
HyperKit,
VMware,
Docker,
SSH,
}
var supportedDrivers []string = func() []string {
if runtime.GOARCH == "arm64" {
// on darwin/arm64 only docker and ssh are supported yet
return []string{
Docker,
SSH,
}
}
return []string{
VirtualBox,
Parallels,
VMwareFusion,
HyperKit,
VMware,
Docker,
SSH,
}
}()
func VBoxManagePath() string {
cmd := "VBoxManage"

View File

@ -29,6 +29,7 @@ import (
"github.com/pkg/errors"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/audit"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
@ -188,12 +189,29 @@ func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.Cluster
}
}
if err := outputAudit(lines); err != nil {
klog.Error(err)
failed = append(failed, "audit")
}
if len(failed) > 0 {
return fmt.Errorf("unable to fetch logs for: %s", strings.Join(failed, ", "))
}
return nil
}
// outputAudit displays the audit logs.
func outputAudit(lines int) error {
out.Step(style.Empty, "")
out.Step(style.Empty, "==> Audit <==")
r, err := audit.Report(lines)
if err != nil {
return fmt.Errorf("failed to create audit report: %v", err)
}
out.Step(style.Empty, r.ASCIITable())
return nil
}
// logCommands returns a list of commands that would be run to receive the anticipated logs
func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, length int, follow bool) map[string]string {
cmds := bs.LogCommands(cfg, bootstrapper.LogOptions{Lines: length, Follow: follow})

View File

@ -298,7 +298,7 @@ func postStartSetup(h *host.Host, mc config.ClusterConfig) error {
// acquireMachinesLock protects against code that is not parallel-safe (libmachine, cert setup)
func acquireMachinesLock(name string, drv string) (mutex.Releaser, error) {
lockPath := filepath.Join(localpath.MiniPath(), "machines", drv)
// "With KIC, it's safe to provision multiple hosts simultaneously"
// With KIC, it's safe to provision multiple hosts simultaneously
if driver.IsKIC(drv) {
lockPath = filepath.Join(localpath.MiniPath(), "machines", drv, name)
}

View File

@ -245,6 +245,7 @@ func Provision(cc *config.ClusterConfig, n *config.Node, apiServer bool, delOnFa
func configureRuntimes(runner cruntime.CommandRunner, cc config.ClusterConfig, kv semver.Version) cruntime.Manager {
co := cruntime.Config{
Type: cc.KubernetesConfig.ContainerRuntime,
Socket: cc.KubernetesConfig.CRISocket,
Runner: runner,
ImageRepository: cc.KubernetesConfig.ImageRepository,
KubernetesVersion: kv,

View File

@ -232,7 +232,7 @@ func suggestFix(src string, exitcode int, stderr string, err error) registry.Sta
return registry.State{Reason: "PROVIDER_DOCKER_NEWGRP", Error: err, Installed: true, Running: true, Healthy: false, Fix: "Add your user to the 'docker' group: 'sudo usermod -aG docker $USER && newgrp docker'", Doc: "https://docs.docker.com/engine/install/linux-postinstall/"}
}
if strings.Contains(stderr, "/pipe/docker_engine: The system cannot find the file specified.") && runtime.GOOS == "windows" {
if strings.Contains(stderr, "pipe.*docker_engine.*: The system cannot find the file specified.") && runtime.GOOS == "windows" {
return registry.State{Reason: "PROVIDER_DOCKER_PIPE_NOT_FOUND", Error: err, Installed: true, Running: false, Healthy: false, Fix: "Start the Docker service. If Docker is already running, you may need to reset Docker to factory settings with: Settings > Reset.", Doc: "https://github.com/docker/for-win/issues/1825#issuecomment-450501157"}
}

View File

@ -86,8 +86,10 @@ func TestCheckDockerVersion(t *testing.T) {
for _, c := range tc {
t.Run("checkDockerVersion test", func(t *testing.T) {
s := checkDockerVersion(c.version)
if c.expect != s.Reason {
t.Errorf("Error %v expected. but got %q. (version string : %s)", c.expect, s.Reason, c.version)
if s.Error != nil {
if c.expect != s.Reason {
t.Errorf("Error %v expected. but got %q. (version string : %s)", c.expect, s.Reason, c.version)
}
}
})
}

210
pkg/network/network.go Normal file
View File

@ -0,0 +1,210 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"encoding/binary"
"fmt"
"net"
"github.com/pkg/errors"
"k8s.io/klog/v2"
)
var (
// valid private network subnets (RFC1918)
privateSubnets = []net.IPNet{
// 10.0.0.0/8
{
IP: []byte{10, 0, 0, 0},
Mask: []byte{255, 0, 0, 0},
},
// 172.16.0.0/12
{
IP: []byte{172, 16, 0, 0},
Mask: []byte{255, 240, 0, 0},
},
// 192.168.0.0/16
{
IP: []byte{192, 168, 0, 0},
Mask: []byte{255, 255, 0, 0},
},
}
)
// Parameters contains main network parameters.
type Parameters struct {
IP string // IP address of the network
Netmask string // form: 4-byte ('a.b.c.d')
CIDR string // form: CIDR
Gateway string // first IP address (assumed, not checked !)
ClientMin string // second IP address
ClientMax string // last IP address before broadcastS
Broadcast string // last IP address
Interface
}
// Interface contains main network interface parameters.
type Interface struct {
IfaceName string
IfaceIPv4 string
IfaceMTU int
IfaceMAC string
}
// inspect initialises IPv4 network parameters struct from given address.
// address can be single address (like "192.168.17.42"), network address (like "192.168.17.0"), or in cidr form (like "192.168.17.42/24 or "192.168.17.0/24").
// If addr is valid existsing interface address, network struct will also contain info about the respective interface.
func inspect(addr string) (*Parameters, error) {
n := &Parameters{}
// extract ip from addr
ip, network, err := net.ParseCIDR(addr)
if err != nil {
ip = net.ParseIP(addr)
if ip == nil {
return nil, errors.Wrapf(err, "parsing address %q", addr)
}
}
// check local interfaces
ifaces, _ := net.Interfaces()
for _, iface := range ifaces {
ifAddrs, err := iface.Addrs()
if err != nil {
return nil, errors.Wrapf(err, "listing addresses of network interface %+v", iface)
}
for _, ifAddr := range ifAddrs {
ifip, lan, err := net.ParseCIDR(ifAddr.String())
if err != nil {
return nil, errors.Wrapf(err, "parsing address of network iface %+v", ifAddr)
}
if lan.Contains(ip) {
n.IfaceName = iface.Name
n.IfaceIPv4 = ifip.To4().String()
n.IfaceMTU = iface.MTU
n.IfaceMAC = iface.HardwareAddr.String()
n.Gateway = n.IfaceIPv4
network = lan
break
}
}
}
if network == nil {
ipnet := &net.IPNet{
IP: ip,
Mask: ip.DefaultMask(), // assume default network mask
}
_, network, err = net.ParseCIDR(ipnet.String())
if err != nil {
return nil, errors.Wrapf(err, "determining network address from %q", addr)
}
}
n.IP = network.IP.String()
n.Netmask = net.IP(network.Mask).String() // form: 4-byte ('a.b.c.d')
n.CIDR = network.String()
networkIP := binary.BigEndian.Uint32(network.IP) // IP address of the network
networkMask := binary.BigEndian.Uint32(network.Mask) // network mask
broadcastIP := (networkIP & networkMask) | (networkMask ^ 0xffffffff) // last network IP address
broadcast := make(net.IP, 4)
binary.BigEndian.PutUint32(broadcast, broadcastIP)
n.Broadcast = broadcast.String()
gateway := net.ParseIP(n.Gateway).To4() // has to be converted to 4-byte representation!
if gateway == nil {
gateway = make(net.IP, 4)
binary.BigEndian.PutUint32(gateway, networkIP+1) // assume first network IP address
n.Gateway = gateway.String()
}
gatewayIP := binary.BigEndian.Uint32(gateway)
min := make(net.IP, 4)
binary.BigEndian.PutUint32(min, gatewayIP+1) // clients-from: first network IP address after gateway
n.ClientMin = min.String()
max := make(net.IP, 4)
binary.BigEndian.PutUint32(max, broadcastIP-1) // clients-from: last network IP address before broadcast
n.ClientMax = max.String()
return n, nil
}
// isSubnetTaken returns if local network subnet exists and any error occurred.
// If will return false in case of an error.
func isSubnetTaken(subnet string) (bool, error) {
ips, err := net.InterfaceAddrs()
if err != nil {
return false, errors.Wrap(err, "listing local networks")
}
for _, ip := range ips {
_, lan, err := net.ParseCIDR(ip.String())
if err != nil {
return false, errors.Wrapf(err, "parsing network iface address %q", ip)
}
if lan.Contains(net.ParseIP(subnet)) {
return true, nil
}
}
return false, nil
}
// isSubnetPrivate returns if subnet is a private network.
func isSubnetPrivate(subnet string) bool {
for _, ipnet := range privateSubnets {
if ipnet.Contains(net.ParseIP(subnet)) {
return true
}
}
return false
}
// FreeSubnet will try to find free private network beginning with startSubnet, incrementing it in steps up to number of tries.
func FreeSubnet(startSubnet string, step, tries int) (*Parameters, error) {
for try := 0; try < tries; try++ {
n, err := inspect(startSubnet)
if err != nil {
return nil, err
}
startSubnet = n.IP
if isSubnetPrivate(startSubnet) {
taken, err := isSubnetTaken(startSubnet)
if err != nil {
return nil, err
}
if !taken {
klog.Infof("using free private subnet %s: %+v", n.CIDR, n)
return n, nil
}
klog.Infof("skipping subnet %s that is taken: %+v", n.CIDR, n)
} else {
klog.Infof("skipping subnet %s that is not private", n.CIDR)
}
ones, _ := net.ParseIP(n.IP).DefaultMask().Size()
nextSubnet := net.ParseIP(startSubnet).To4()
if ones <= 16 {
nextSubnet[1] += byte(step)
} else {
nextSubnet[2] += byte(step)
}
startSubnet = nextSubnet.String()
}
return nil, fmt.Errorf("no free private network subnets found with given parameters (start: %q, step: %d, tries: %d)", startSubnet, step, tries)
}

View File

@ -26,7 +26,7 @@ minikube start [flags]
--apiserver-names strings A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine
--apiserver-port int The apiserver listening port (default 8443)
--auto-update-drivers If set, automatically updates drivers to the latest version. Defaults to true. (default true)
--base-image string The base image to use for docker/podman drivers. Intended for local development. (default "gcr.io/k8s-minikube/kicbase:v0.0.17@sha256:1cd2e039ec9d418e6380b2fa0280503a72e5b282adea674ee67882f59f4f546e")
--base-image string The base image to use for docker/podman drivers. Intended for local development. (default "gcr.io/k8s-minikube/kicbase-builds:v0.0.17-1613704090-10418@sha256:876f620cdc40b4616e4e11db64524c520e252ede006357eaa963488ae852b6ed")
--cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none. (default true)
--cni string CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)
--container-runtime string The container runtime to be used (docker, cri-o, containerd). (default "docker")

View File

@ -8,7 +8,9 @@ description: >
---
Community triage takes place **every Wednesday** from **11AM-12PM PST**.
Hangouts link: https://meet.google.com/ikf-fvrs-eer
- Hangouts link: https://meet.google.com/ikf-fvrs-eer
- Google Group: https://groups.google.com/forum/#!forum/minikube-dev
All community members are welcome and encouraged to join and help us triage minikube!

View File

@ -25,7 +25,7 @@ A NodePort service is the most basic way to get external traffic directly to you
We also have a shortcut for fetching the minikube IP and a service's `NodePort`:
```shell
minikube service --url $SERVICE
minikube service --url <service-name>
```
## Getting the NodePort using kubectl
@ -35,7 +35,7 @@ The minikube VM is exposed to the host system via a host-only IP address, that c
To determine the NodePort for your service, you can use a `kubectl` command like this (note that `nodePort` begins with lowercase `n` in JSON output):
```shell
kubectl get service $SERVICE --output='jsonpath="{.spec.ports[0].nodePort}"'
kubectl get service <service-name> --output='jsonpath="{.spec.ports[0].nodePort}"'
```
### Increasing the NodePort range

View File

@ -59,7 +59,7 @@ func TestAddons(t *testing.T) {
}
args := append([]string{"start", "-p", profile, "--wait=true", "--memory=4000", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=olm", "--addons=volumesnapshots", "--addons=csi-hostpath-driver", "--addons=gcp-auth"}, StartArgs()...)
if !NoneDriver() && !(runtime.GOOS == "darwin" && KicDriver()) { // none doesn't support ingress
if !(runtime.GOOS == "darwin" && KicDriver()) { // macos docker driver does not support ingress
args = append(args, "--addons=ingress")
}
if !arm64Platform() {
@ -114,8 +114,8 @@ func TestAddons(t *testing.T) {
func validateIngressAddon(ctx context.Context, t *testing.T, profile string) {
defer PostMortemLogs(t, profile)
if NoneDriver() || (runtime.GOOS == "darwin" && KicDriver()) {
t.Skipf("skipping: ssh unsupported by none")
if runtime.GOOS == "darwin" && KicDriver() {
t.Skipf("skipping: ingress not supported on macOS docker driver")
}
client, err := kapi.Client(profile)
@ -136,7 +136,7 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) {
return err
}
if rr.Stderr.String() != "" {
t.Logf("%v: unexpected stderr: %s (may be temproary)", rr.Command(), rr.Stderr)
t.Logf("%v: unexpected stderr: %s (may be temporary)", rr.Command(), rr.Stderr)
}
return nil
}
@ -160,9 +160,18 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) {
want := "Welcome to nginx!"
addr := "http://127.0.0.1/"
checkIngress := func() error {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("curl -s %s -H 'Host: nginx.example.com'", addr)))
if err != nil {
return err
var rr *RunResult
var err error
if NoneDriver() { // just run curl directly on the none driver
rr, err = Run(t, exec.CommandContext(ctx, "curl", "-s", addr, "-H", "'Host: nginx.example.com'"))
if err != nil {
return err
}
} else {
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("curl -s %s -H 'Host: nginx.example.com'", addr)))
if err != nil {
return err
}
}
stderr := rr.Stderr.String()

View File

@ -19,6 +19,7 @@ limitations under the License.
package integration
import (
"bufio"
"bytes"
"context"
"encoding/json"
@ -256,7 +257,6 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) {
if !strings.Contains(rr.Output(), expectedImgInside) {
t.Fatalf("expected 'docker images' to have %q inside minikube. but the output is: *%s*", expectedImgInside, rr.Output())
}
}
func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) {
@ -269,7 +269,7 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) {
// Use more memory so that we may reliably fit MySQL and nginx
// changing api server so later in soft start we verify it didn't change
startArgs := append([]string{"start", "-p", profile, "--memory=4000", fmt.Sprintf("--apiserver-port=%d", apiPortTest), "--wait=true"}, StartArgs()...)
startArgs := append([]string{"start", "-p", profile, "--memory=4000", fmt.Sprintf("--apiserver-port=%d", apiPortTest), "--wait=all"}, StartArgs()...)
c := exec.CommandContext(ctx, Target(), startArgs...)
env := os.Environ()
env = append(env, fmt.Sprintf("HTTP_PROXY=%s", srv.Addr))
@ -401,7 +401,6 @@ func validateMinikubeKubectlDirectCall(ctx context.Context, t *testing.T, profil
if err != nil {
t.Fatalf("failed to run kubectl directly. args %q: %v", rr.Command(), err)
}
}
func validateExtraConfig(ctx context.Context, t *testing.T, profile string) {
@ -409,7 +408,7 @@ func validateExtraConfig(ctx context.Context, t *testing.T, profile string) {
start := time.Now()
// The tests before this already created a profile, starting minikube with different --extra-config cmdline option.
startArgs := []string{"start", "-p", profile, "--extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision"}
startArgs := []string{"start", "-p", profile, "--extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision", "--wait=all"}
c := exec.CommandContext(ctx, Target(), startArgs...)
rr, err := Run(t, c)
if err != nil {
@ -427,7 +426,6 @@ func validateExtraConfig(ctx context.Context, t *testing.T, profile string) {
if !strings.Contains(afterCfg.Config.KubernetesConfig.ExtraOptions.String(), expectedExtraOptions) {
t.Errorf("expected ExtraOptions to contain %s but got %s", expectedExtraOptions, afterCfg.Config.KubernetesConfig.ExtraOptions.String())
}
}
// imageID returns a docker image id for image `image` and current architecture
@ -451,6 +449,7 @@ func imageID(image string) string {
}
// validateComponentHealth asserts that all Kubernetes components are healthy
// note: it expects all components to be Ready, so it makes sense to run it close after only those tests that include '--wait=all' start flag (ie, with extra wait)
func validateComponentHealth(ctx context.Context, t *testing.T, profile string) {
defer PostMortemLogs(t, profile)
@ -474,12 +473,22 @@ func validateComponentHealth(ctx context.Context, t *testing.T, profile string)
for _, i := range cs.Items {
for _, l := range i.Labels {
t.Logf("%s phase: %s", l, i.Status.Phase)
_, ok := found[l]
if ok {
if _, ok := found[l]; ok { // skip irrelevant (eg, repeating/redundant '"tier": "control-plane"') labels
found[l] = true
if i.Status.Phase != "Running" {
t.Logf("%s phase: %s", l, i.Status.Phase)
if i.Status.Phase != api.PodRunning {
t.Errorf("%s is not Running: %+v", l, i.Status)
continue
}
for _, c := range i.Status.Conditions {
if c.Type == api.PodReady {
if c.Status != api.ConditionTrue {
t.Errorf("%s is not Ready: %+v", l, i.Status)
} else {
t.Logf("%s status: %s", l, c.Type)
}
break
}
}
}
}
@ -538,8 +547,11 @@ func validateStatusCmd(ctx context.Context, t *testing.T, profile string) {
func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) {
defer PostMortemLogs(t, profile)
mctx, cancel := context.WithTimeout(ctx, Seconds(300))
defer cancel()
args := []string{"dashboard", "--url", "-p", profile, "--alsologtostderr", "-v=1"}
ss, err := Start(t, exec.CommandContext(ctx, Target(), args...))
ss, err := Start(t, exec.CommandContext(mctx, Target(), args...))
if err != nil {
t.Errorf("failed to run minikube dashboard. args %q : %v", args, err)
}
@ -547,13 +559,12 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) {
ss.Stop(t)
}()
start := time.Now()
s, err := ReadLineWithTimeout(ss.Stdout, Seconds(300))
s, err := dashboardURL(ss.Stdout)
if err != nil {
if runtime.GOOS == "windows" {
t.Skipf("failed to read url within %s: %v\noutput: %q\n", time.Since(start), err, s)
t.Skip(err)
}
t.Fatalf("failed to read url within %s: %v\noutput: %q\n", time.Since(start), err, s)
t.Fatal(err)
}
u, err := url.Parse(strings.TrimSpace(s))
@ -575,6 +586,24 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) {
}
}
// dashboardURL gets the dashboard URL from the command stdout.
func dashboardURL(b *bufio.Reader) (string, error) {
// match http://127.0.0.1:XXXXX/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/
dashURLRegexp := regexp.MustCompile(`^http:\/\/127\.0\.0\.1:[0-9]{5}\/api\/v1\/namespaces\/kubernetes-dashboard\/services\/http:kubernetes-dashboard:\/proxy\/$`)
s := bufio.NewScanner(b)
for s.Scan() {
t := s.Text()
if dashURLRegexp.MatchString(t) {
return t, nil
}
}
if err := s.Err(); err != nil {
return "", fmt.Errorf("failed reading input: %v", err)
}
return "", fmt.Errorf("output didn't produce a URL")
}
// validateDryRun asserts that the dry-run mode quickly exits with the right code
func validateDryRun(ctx context.Context, t *testing.T, profile string) {
// dry-run mode should always be able to finish quickly (<5s)
@ -768,7 +797,7 @@ func validateLogsCmd(ctx context.Context, t *testing.T, profile string) {
if err != nil {
t.Errorf("%s failed: %v", rr.Command(), err)
}
expectedWords := []string{"apiserver", "Linux", "kubelet"}
expectedWords := []string{"apiserver", "Linux", "kubelet", "Audit"}
switch ContainerRuntime() {
case "docker":
expectedWords = append(expectedWords, "Docker")

View File

@ -155,7 +155,7 @@ func TestErrorJSONOutput(t *testing.T) {
t.Fatalf("last cloud event is not of type error: %v", last)
}
last.validateData(t, "exitcode", fmt.Sprintf("%v", reason.ExDriverUnsupported))
last.validateData(t, "message", fmt.Sprintf("The driver 'fail' is not supported on %s", runtime.GOOS))
last.validateData(t, "message", fmt.Sprintf("The driver 'fail' is not supported on %s/%s", runtime.GOOS, runtime.GOARCH))
}
type cloudEvent struct {

View File

@ -95,7 +95,7 @@ func TestSkaffold(t *testing.T) {
}()
// make sure "skaffold run" exits without failure
cmd := exec.CommandContext(ctx, tf.Name(), "run", "--minikube-profile", profile, "--kube-context", profile, "--status-check=true", "--port-forward=false")
cmd := exec.CommandContext(ctx, tf.Name(), "run", "--minikube-profile", profile, "--kube-context", profile, "--status-check=true", "--port-forward=false", "--interactive=false")
cmd.Dir = "testdata/skaffold"
rr, err = Run(t, cmd)
if err != nil {

View File

@ -79,7 +79,7 @@ func TestStartStop(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
MaybeParallel(t)
profile := UniqueProfileName(tc.name)
ctx, cancel := context.WithTimeout(context.Background(), Minutes(40))
ctx, cancel := context.WithTimeout(context.Background(), Minutes(30))
defer Cleanup(t, profile, cancel)
type validateStartStopFunc func(context.Context, *testing.T, string, string, string, []string)
if !strings.Contains(tc.name, "docker") && NoneDriver() {

View File

@ -26,31 +26,6 @@ import (
"k8s.io/minikube/pkg/minikube/localpath"
)
// ReadLineWithTimeout reads a line of text from a buffer with a timeout
func ReadLineWithTimeout(b *bufio.Reader, timeout time.Duration) (string, error) {
s := make(chan string)
e := make(chan error)
go func() {
read, err := b.ReadString('\n')
if err != nil {
e <- err
} else {
s <- read
}
close(s)
close(e)
}()
select {
case line := <-s:
return line, nil
case err := <-e:
return "", err
case <-time.After(timeout):
return "", fmt.Errorf("timeout after %s", timeout)
}
}
// UniqueProfileName returns a reasonably unique profile name
func UniqueProfileName(prefix string) string {
if *forceProfile != "" {

View File

@ -196,6 +196,14 @@ func TestStoppedBinaryUpgrade(t *testing.T) {
if err != nil {
t.Fatalf("upgrade from %s to HEAD failed: %s: %v", legacyVersion, rr.Command(), err)
}
t.Run("MinikubeLogs", func(t *testing.T) {
args := []string{"logs", "-p", profile}
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Fatalf("`minikube logs` after upgrade to HEAD from %s failed: %v", legacyVersion, err)
}
})
}
// TestKubernetesUpgrade upgrades Kubernetes from oldest to newest

View File

@ -510,7 +510,7 @@
"The cri socket path to be used.": "",
"The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
"The driver '{{.driver}}' is not supported on {{.os}}": "Der Treiber '{{.driver}}' wird auf {{.os}} nicht unterstützt",
"The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}": "Der Treiber '{{.driver}}' wird auf {{.os}}/{{.arch}} nicht unterstützt",
"The existing \"{{.name}}\" cluster was created using the \"{{.old}}\" driver, which is incompatible with requested \"{{.new}}\" driver.": "",
"The existing node configuration appears to be corrupt. Run 'minikube delete'": "",
"The heapster addon is depreciated. please try to disable metrics-server instead": "",

View File

@ -511,7 +511,7 @@
"The cri socket path to be used.": "",
"The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
"The driver '{{.driver}}' is not supported on {{.os}}": "El controlador \"{{.driver}}\" no se puede utilizar en {{.os}}",
"The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}": "El controlador \"{{.driver}}\" no se puede utilizar en {{.os}}/{{.arch}}",
"The existing \"{{.name}}\" cluster was created using the \"{{.old}}\" driver, which is incompatible with requested \"{{.new}}\" driver.": "",
"The existing node configuration appears to be corrupt. Run 'minikube delete'": "",
"The heapster addon is depreciated. please try to disable metrics-server instead": "",

View File

@ -513,7 +513,7 @@
"The cri socket path to be used.": "",
"The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
"The driver '{{.driver}}' is not supported on {{.os}}": "Le pilote \"{{.driver}}\" n'est pas compatible avec {{.os}}.",
"The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}": "Le pilote \"{{.driver}}\" n'est pas compatible avec {{.os}}/{{.arch}}.",
"The existing \"{{.name}}\" cluster was created using the \"{{.old}}\" driver, which is incompatible with requested \"{{.new}}\" driver.": "",
"The existing node configuration appears to be corrupt. Run 'minikube delete'": "",
"The heapster addon is depreciated. please try to disable metrics-server instead": "",

View File

@ -507,7 +507,7 @@
"The cri socket path to be used.": "",
"The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
"The driver '{{.driver}}' is not supported on {{.os}}": "ドライバ「{{.driver}}」は、{{.os}} ではサポートされていません",
"The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}": "ドライバ「{{.driver}}」は、{{.os}}/{{.arch}} ではサポートされていません",
"The existing \"{{.name}}\" cluster was created using the \"{{.old}}\" driver, which is incompatible with requested \"{{.new}}\" driver.": "",
"The existing node configuration appears to be corrupt. Run 'minikube delete'": "",
"The heapster addon is depreciated. please try to disable metrics-server instead": "",

View File

@ -480,7 +480,7 @@
"The cri socket path to be used.": "",
"The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
"The driver '{{.driver}}' is not supported on {{.os}}": "",
"The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}": "",
"The existing \"{{.name}}\" cluster was created using the \"{{.old}}\" driver, which is incompatible with requested \"{{.new}}\" driver.": "",
"The heapster addon is depreciated. please try to disable metrics-server instead": "",
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "",

View File

@ -524,7 +524,7 @@
"The docker service is currently not active": "Serwis docker jest nieaktywny",
"The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
"The driver '{{.driver}}' is not supported on {{.os}}": "Sterownik '{{.driver}} jest niewspierany przez system {{.os}}",
"The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}": "Sterownik '{{.driver}} jest niewspierany przez system {{.os}}/{{.arch}}",
"The existing \"{{.name}}\" cluster was created using the \"{{.old}}\" driver, which is incompatible with requested \"{{.new}}\" driver.": "",
"The existing node configuration appears to be corrupt. Run 'minikube delete'": "",
"The heapster addon is depreciated. please try to disable metrics-server instead": "",

View File

@ -426,7 +426,7 @@
"The cri socket path to be used.": "",
"The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
"The driver '{{.driver}}' is not supported on {{.os}}": "",
"The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}": "",
"The existing \"{{.name}}\" cluster was created using the \"{{.old}}\" driver, which is incompatible with requested \"{{.new}}\" driver.": "",
"The heapster addon is depreciated. please try to disable metrics-server instead": "",
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "",

View File

@ -613,7 +613,7 @@
"The cri socket path to be used.": "",
"The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
"The driver '{{.driver}}' is not supported on {{.os}}": "{{.os}} 不支持驱动程序“{{.driver}}”",
"The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}": "{{.os}} 不支持驱动程序“{{.driver}}/{{.arch}}”",
"The existing \"{{.name}}\" cluster was created using the \"{{.old}}\" driver, which is incompatible with requested \"{{.new}}\" driver.": "",
"The existing node configuration appears to be corrupt. Run 'minikube delete'": "",
"The heapster addon is depreciated. please try to disable metrics-server instead": "",