Merge branch 'master' of https://github.com/kubernetes/minikube into master_srikrishnabh

pull/12084/head
srikrishnabh93@gmail.com 2021-08-01 13:01:54 +05:30
commit efbfbc1f21
540 changed files with 59520 additions and 4694 deletions

View File

@ -1,6 +1,7 @@
---
name: فارسی
about: مشكلی را گرزارش كن
labels: l/fa
---
<!-- لطفا از این قالب برای فرستادن گزارش استفاده کنید، هرچه می توانید اطلاعات بیشتری به ما بدهید. در غیر اینصورت با تاخیر بیشتری جواب خواهید گرفت. تشکر. -->

View File

@ -1,6 +1,7 @@
---
name: Kurdî
about: Girêftek gozarîş bike
labels: l/ku
---
<!-- Tikaye lem qalîbe bo dorust kirdinî gozarîş kelk wer bigre, ta ewendey detwani zanyari zortirman pê bide. bo xêratirîn willam. gelêk sipas! -->

View File

@ -1,5 +1,6 @@
name: build
on:
workflow_dispatch:
push:
branches:
- master
@ -11,7 +12,7 @@ on:
- "!deploy/iso/**"
env:
GOPROXY: https://proxy.golang.org
GO_VERSION: 1.16.1
GO_VERSION: '1.16.6'
jobs:
build_minikube:
runs-on: ubuntu-18.04

42
.github/workflows/docs.yml vendored Normal file
View File

@ -0,0 +1,42 @@
name: "generate-docs"
on:
workflow_dispatch:
push:
branches:
- master
env:
GOPROXY: https://proxy.golang.org
GO_VERSION: '1.16.6'
jobs:
generate-docs:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: ${{env.GO_VERSION}}
stable: true
- name: Generate Docs
id: gendocs
run: |
make generate-docs
echo "::set-output name=changes::$(git status --porcelain)"
- name: Create PR
if: ${{ steps.gendocs.outputs.changes != '' }}
uses: peter-evans/create-pull-request@v3
with:
token: ${{ secrets.MINIKUBE_BOT_PAT }}
commit-message: Update auto-generated docs and translations
committer: minikube-bot <minikube-bot@google.com>
author: minikube-bot <minikube-bot@google.com>
branch: gendocs
push-to-fork: minikube-bot/minikube
base: master
delete-branch: true
title: 'Update auto-generated docs and translations'
body: |
Committing changes resulting from `make generate-docs`.
This PR is auto-generated by the [gendocs](https://github.com/kubernetes/minikube/blob/master/.github/workflows/docs.yml) CI workflow.
```
${{ steps.gendocs.outputs.changes }}
```

45
.github/workflows/leaderboard.yml vendored Normal file
View File

@ -0,0 +1,45 @@
name: "update-leaderboard"
on:
workflow_dispatch:
push:
tags-ignore:
- 'v*-beta.*'
release:
types: [published]
env:
GO_VERSION: '1.16.6'
jobs:
update-leaderboard:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: ${{env.GO_VERSION}}
stable: true
- name: Update Leaderboard
id: leaderboard
run: |
make update-leaderboard
echo "::set-output name=changes::$(git status --porcelain)"
env:
GITHUB_TOKEN: ${{ secrets.MINIKUBE_BOT_PAT }}
- name: Create PR
if: ${{ steps.leaderboard.outputs.changes != '' }}
uses: peter-evans/create-pull-request@v3
with:
token: ${{ secrets.MINIKUBE_BOT_PAT }}
commit-message: Update leaderboard
committer: minikube-bot <minikube-bot@google.com>
author: minikube-bot <minikube-bot@google.com>
branch: leaderboard
push-to-fork: minikube-bot/minikube
base: master
delete-branch: true
title: 'Update leaderboard'
body: |
Committing changes resulting from `make update-leaderboard`.
This PR is auto-generated by the [update-leaderboard](https://github.com/kubernetes/minikube/blob/master/.github/workflows/leaderboard.yml) CI workflow.
```
${{ steps.leaderboard.outputs.changes }}
```

View File

@ -1,5 +1,6 @@
name: Master
on:
workflow_dispatch:
push:
branches:
- master
@ -13,7 +14,7 @@ on:
- "!deploy/iso/**"
env:
GOPROXY: https://proxy.golang.org
GO_VERSION: 1.16.1
GO_VERSION: '1.16.6'
jobs:
# Runs before all other jobs
# builds the minikube binaries
@ -122,7 +123,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -221,7 +222,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -323,7 +324,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -409,7 +410,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Install docker
shell: bash
@ -555,7 +556,7 @@ jobs:
continue-on-error: true
shell: powershell
run: |
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
@ -693,7 +694,7 @@ jobs:
shell: powershell
run: |
$ErrorActionPreference = "SilentlyContinue"
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
@ -798,7 +799,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -866,6 +867,28 @@ jobs:
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
steps:
- name: Install tools
shell: bash
run: |
sudo apt update
sudo apt install -y jq docker git cron
sudo usermod -aG docker $USER
- name: Init
shell: bash
run: |
if [[ -f /var/run/reboot.in.progress ]]; then
echo "reboot in progress"
exit 1
fi
sudo touch /var/run/job.in.progress
rm -rf cleanup.sh install_cleanup.sh
# after this PR is merged, update URLs to get the scripts from github master
wget https://storage.googleapis.com/minikube-ci-utils/cleanup.sh
wget https://storage.googleapis.com/minikube-ci-utils/install_cleanup.sh
chmod +x cleanup.sh install_cleanup.sh
./install_cleanup.sh
- name: Install kubectl
shell: bash
run: |
@ -876,15 +899,9 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-arm64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-linux-arm64
sudo install gopogh-linux-arm64 /usr/local/bin/gopogh
- name: Install tools
shell: bash
run: |
sudo apt update
sudo apt install -y jq docker
- name: Docker Info
shell: bash
run: |

View File

@ -1,5 +1,6 @@
name: PR
on:
workflow_dispatch:
pull_request:
paths:
- "go.mod"
@ -11,7 +12,7 @@ on:
- "!deploy/iso/**"
env:
GOPROXY: https://proxy.golang.org
GO_VERSION: 1.16.1
GO_VERSION: '1.16.6'
jobs:
# Runs before all other jobs
# builds the minikube binaries
@ -119,7 +120,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -137,6 +138,7 @@ jobs:
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
MINIKUBE_HOME=$(pwd)/testhome ./minikube-linux-amd64 delete --all --purge
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.run TestFunctional -test.timeout=10m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
@ -218,7 +220,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -236,6 +238,7 @@ jobs:
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
MINIKUBE_HOME=$(pwd)/testhome ./minikube-linux-amd64 delete --all --purge
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args="--vm-driver=docker --container-runtime=containerd" -test.run TestFunctional -test.timeout=30m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
@ -320,7 +323,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -338,6 +341,7 @@ jobs:
chmod a+x minikube-*
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
MINIKUBE_HOME=$(pwd)/testhome ./minikube-linux-amd64 delete --all --purge
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=podman -v=6 --alsologtostderr -test.run TestFunctional -test.timeout=10m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
@ -406,7 +410,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Install docker
shell: bash
@ -437,6 +441,7 @@ jobs:
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
MINIKUBE_HOME=$(pwd)/testhome ./minikube-darwin-amd64 delete --all --purge
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-darwin-amd64 -minikube-start-args=--vm-driver=virtualbox -test.run "TestFunctional" -test.timeout=35m -test.v -timeout-multiplier=1.5 -binary=./minikube-darwin-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
@ -552,7 +557,7 @@ jobs:
continue-on-error: true
shell: powershell
run: |
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
@ -690,7 +695,7 @@ jobs:
shell: powershell
run: |
$ErrorActionPreference = "SilentlyContinue"
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
@ -795,7 +800,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -811,6 +816,7 @@ jobs:
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
MINIKUBE_HOME=$(pwd)/testhome ./minikube-linux-amd64 delete --all --purge
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=10m -test.v -timeout-multiplier=1.5 -test.run TestFunctional -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
@ -863,6 +869,28 @@ jobs:
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
steps:
- name: Install tools
shell: bash
run: |
sudo apt update
sudo apt install -y jq docker git cron
sudo usermod -aG docker $USER
- name: Init
shell: bash
run: |
if [[ -f /var/run/reboot.in.progress ]]; then
echo "reboot in progress"
exit 1
fi
sudo touch /var/run/job.in.progress
rm -rf cleanup.sh install_cleanup.sh
# after this PR is merged, update URLs to get the scripts from github master
wget https://storage.googleapis.com/minikube-ci-utils/cleanup.sh
wget https://storage.googleapis.com/minikube-ci-utils/install_cleanup.sh
chmod +x cleanup.sh install_cleanup.sh
./install_cleanup.sh
- name: Install kubectl
shell: bash
run: |
@ -873,15 +901,9 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-arm64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-linux-arm64
sudo install gopogh-linux-arm64 /usr/local/bin/gopogh
- name: Install tools
shell: bash
run: |
sudo apt update
sudo apt install -y jq docker
- name: Docker Info
shell: bash
run: |
@ -928,6 +950,7 @@ jobs:
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
MINIKUBE_HOME=$(pwd)/testhome ./minikube-linux-arm64 delete --all --purge
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-arm64 -minikube-start-args=--vm-driver=docker -test.run TestFunctional -test.timeout=25m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-arm64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
@ -973,6 +996,10 @@ jobs:
if [ "$numPass" -eq 0 ];then echo "*** 0 Passed! ***";exit 2;fi
if [ "$numPass" -lt 0 ];then echo "*** Failed to pass at least 20! ***";exit 2;fi
if [ "$numPass" -eq 0 ];then echo "*** Passed! ***";exit 0;fi
- name: finalize
shell: bash
run: sudo rm -rf /var/run/job.in.progress
# After all integration tests finished
# collect all the reports and upload them
upload_all_reports:

View File

@ -1,5 +1,6 @@
name: PR_Verified
on:
workflow_dispatch:
pull_request:
paths:
- "go.mod"
@ -20,7 +21,7 @@ on:
- deleted
env:
GOPROXY: https://proxy.golang.org
GO_VERSION: 1.16.1
GO_VERSION: '1.16.6'
jobs:
# Runs before all other jobs
@ -40,7 +41,7 @@ jobs:
run: |
sudo apt-get update
sudo apt-get install -y libvirt-dev
make cross e2e-cross debs
MINIKUBE_BUILD_IN_DOCKER=y make cross e2e-cross debs
cp -r test/integration/testdata ./out
whoami
echo github ref $GITHUB_REF
@ -66,7 +67,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Install Go
@ -154,7 +155,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-arm64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-linux-arm64
sudo install gopogh-linux-arm64 /usr/local/bin/gopogh
- name: Install Go
@ -267,7 +268,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -349,7 +350,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Install docker
shell: bash
@ -461,7 +462,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -545,7 +546,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -652,7 +653,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -734,7 +735,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -873,7 +874,7 @@ jobs:
continue-on-error: true
shell: powershell
run: |
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
@ -967,7 +968,7 @@ jobs:
shell: powershell
run: |
$ErrorActionPreference = "SilentlyContinue"
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine

View File

@ -0,0 +1,30 @@
name: "time-to-k8s Public Chart"
on:
workflow_dispatch:
schedule:
# every day at 7am & 7pm pacific
- cron: "0 2,14 * * *"
env:
GOPROXY: https://proxy.golang.org
GO_VERSION: '1.16.6'
jobs:
time-to-k8s-public-chart:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: ${{env.GO_VERSION}}
stable: true
- name: Set up Cloud SDK
uses: google-github-actions/setup-gcloud@master
with:
project_id: ${{ secrets.GCP_PROJECT_ID }}
service_account_key: ${{ secrets.GCP_TIME_TO_K8S_SA_KEY }}
export_default_credentials: true
- name: Benchmark time-to-k8s for Docker
run: |
./hack/benchmark/time-to-k8s/public-chart/public-chart.sh docker
- name: Benchmark time-to-k8s for Containerd
run: |
./hack/benchmark/time-to-k8s/public-chart/public-chart.sh containerd

36
.github/workflows/time-to-k8s.yml vendored Normal file
View File

@ -0,0 +1,36 @@
name: "time-to-k8s benchmark"
on:
workflow_dispatch:
release:
types: [released]
env:
GOPROXY: https://proxy.golang.org
GO_VERSION: '1.16.6'
jobs:
benchmark:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
- name: Checkout submodules
run: git submodule update --init
- uses: actions/setup-go@v2
with:
go-version: ${{env.GO_VERSION}}
stable: true
- name: Benchmark
run: |
./hack/benchmark/time-to-k8s/time-to-k8s.sh
echo "::set-output name=version::$(minikube version --short)"
- name: Create PR
uses: peter-evans/create-pull-request@v3
with:
token: ${{ secrets.MINIKUBE_BOT_PAT }}
commit-message: add time-to-k8s benchmark for ${{ steps.gendocs.outputs.version }}
committer: minikube-bot <minikube-bot@google.com>
author: minikube-bot <minikube-bot@google.com>
branch: addTimeToK8s${{ steps.gendocs.outputs.version }}
push-to-fork: minikube-bot/minikube
base: master
delete-branch: true
title: Add time-to-k8s benchmark for ${{ steps.gendocs.outputs.version }}
body: Updating time-to-k8s benchmark as part of the release process

29
.github/workflows/translations.yml vendored Normal file
View File

@ -0,0 +1,29 @@
name: Translations Validation
on:
workflow_dispatch:
pull_request:
paths:
- "translations/**"
env:
GOPROXY: https://proxy.golang.org
GO_VERSION: '1.16.6'
jobs:
unit_test:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: ${{env.GO_VERSION}}
stable: true
- name: Install libvirt
run: |
sudo apt-get update
sudo apt-get install -y libvirt-dev
- name: Download Dependencies
run: go mod download
- name: Unit Test
env:
TESTSUITE: unittest
run: make test
continue-on-error: false

19
.github/workflows/twitter-bot.yml vendored Normal file
View File

@ -0,0 +1,19 @@
name: "Tweet the release"
on:
workflow_dispatch:
push:
tags:
- 'v*'
release:
types: [published]
jobs:
twitter-release:
runs-on: ubuntu-latest
steps:
- uses: ethomson/send-tweet-action@v1
with:
status: "A new minikube version just released ! check it out https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md"
consumer-key: ${{ secrets.TWITTER_API_KEY }}
consumer-secret: ${{ secrets.TWITTER_API_SECRET }}
access-token: ${{ secrets.TWITTER_ACCESS_TOKEN }}
access-token-secret: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }}

View File

@ -0,0 +1,45 @@
name: "update-golang-versions"
on:
workflow_dispatch:
schedule:
# every Monday at around 2 am pacific/9 am UTC
- cron: "0 9 * * 1"
env:
GOPROXY: https://proxy.golang.org
GO_VERSION: '1.16.6'
jobs:
bump-k8s-versions:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: ${{env.GO_VERSION}}
stable: true
- name: Bump Golang Versions
id: bumpGolang
run: |
make update-golang-version
echo "::set-output name=changes::$(git status --porcelain)"
- name: Create PR
if: ${{ steps.bumpGolang.outputs.changes != '' }}
uses: peter-evans/create-pull-request@v3
with:
token: ${{ secrets.MINIKUBE_BOT_PAT }}
commit-message: bump golang versions
committer: minikube-bot <minikube-bot@google.com>
author: minikube-bot <minikube-bot@google.com>
branch: auto_bump_golang_version
push-to-fork: minikube-bot/minikube
base: master
delete-branch: true
title: 'bump golang version'
labels: ok-to-test
body: |
Kubernetes Project just updated the [golang version](https://github.com/kubernetes/kubernetes/blob/master/build/build-image/cross/VERSION), updating minikube golang to match Kubernetes.
This PR was auto-generated by `make update-golang-version` using [update-golang-versions.yml](https://github.com/kubernetes/minikube/tree/master/.github/workflows/update-golang-version.yml) CI Workflow.

View File

@ -0,0 +1,43 @@
name: "update-kubernetes-versions"
on:
workflow_dispatch:
schedule:
# every Monday at around 1 am pacific/8 am UTC
- cron: "0 8 * * 1"
env:
GOPROXY: https://proxy.golang.org
GO_VERSION: '1.16.6'
jobs:
bump-k8s-versions:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: ${{env.GO_VERSION}}
stable: true
- name: Bump Kuberenetes Versions
id: bumpk8s
run: |
make update-kubernetes-version
echo "::set-output name=changes::$(git status --porcelain)"
- name: Create PR
if: ${{ steps.bumpk8s.outputs.changes != '' }}
uses: peter-evans/create-pull-request@v3
with:
token: ${{ secrets.MINIKUBE_BOT_PAT }}
commit-message: bump default/newest kubernetes versions
committer: minikube-bot <minikube-bot@google.com>
author: minikube-bot <minikube-bot@google.com>
branch: auto_bump_k8s_versions
push-to-fork: minikube-bot/minikube
base: master
delete-branch: true
title: 'bump default/newest kubernetes versions'
labels: ok-to-test
body: |
This PR was auto-generated by `make update-kubernetes-version` using [update-k8s-versions.yml](https://github.com/kubernetes/minikube/tree/master/.github/workflows) CI Workflow.
Please only merge if all the tests pass.
${{ steps.bumpk8s.outputs.changes }}

4
.gitignore vendored
View File

@ -35,10 +35,6 @@ _testmain.go
#iso version file
deploy/iso/minikube-iso/board/coreos/minikube/rootfs-overlay/etc/VERSION
/pkg/minikube/assets/assets.go-e
/pkg/minikube/assets/assets.go
/pkg/minikube/translate/translations.go
/pkg/minikube/translate/translations.go-e
/minikube
.DS_Store

3
.gitmodules vendored
View File

@ -1,3 +1,6 @@
[submodule "site/themes/docsy"]
path = site/themes/docsy
url = https://github.com/google/docsy.git
[submodule "hack/benchmark/time-to-k8s/time-to-k8s-repo"]
path = hack/benchmark/time-to-k8s/time-to-k8s-repo
url = https://github.com/tstromberg/time-to-k8s.git

View File

@ -1,5 +1,219 @@
# Release Notes
## Version 1.22.0 - 2021-07-07
Features:
* `minikube version`: add `--components` flag to list all included software [#11843](https://github.com/kubernetes/minikube/pull/11843)
Minor Improvements:
* auto-pause: add support for other container runtimes [#11834](https://github.com/kubernetes/minikube/pull/11834)
* windows: support renaming binary to `kubectl.exe` and running as kubectl [#11819](https://github.com/kubernetes/minikube/pull/11819)
Bugs:
* Fix "kubelet Default-Start contains no runlevels" error [#11815](https://github.com/kubernetes/minikube/pull/11815)
Version Upgrades:
* bump default kubernetes version to v1.21.2 & newest kubernetes version to v1.22.0-beta.0 [#11901](https://github.com/kubernetes/minikube/pull/11901)
For a more detailed changelog, including changes occuring in pre-release versions, see [CHANGELOG.md](https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md).
Thank you to our contributors for this release!
- Anders F Björklund
- Andriy Dzikh
- Dakshraj Sharma
- Ilya Zuyev
- Jeff MAURY
- Maxime Kjaer
- Medya Ghazizadeh
- Rajwinder Mahal
- Sharif Elgamal
- Steven Powell
Thank you to our PR reviewers for this release!
- medyagh (27 comments)
- sharifelgamal (10 comments)
- andriyDev (5 comments)
- spowelljr (4 comments)
- ilya-zuyev (3 comments)
Thank you to our triage members for this release!
- medyagh (16 comments)
- spowelljr (7 comments)
- afbjorklund (4 comments)
- mahalrs (4 comments)
- sharifelgamal (3 comments)
## Version 1.22.0-beta.0 - 2021-06-28
Features:
* auto-pause addon: add support for arm64 [#11743](https://github.com/kubernetes/minikube/pull/11743)
* `addon list`: add info on each addon's maintainer [#11753](https://github.com/kubernetes/minikube/pull/11753)
* add ability to pass max to `--cpu` and `--memory` flags [#11692](https://github.com/kubernetes/minikube/pull/11692)
Bugs:
* Fix `--base-image` caching for images specified by name:tag [#11603](https://github.com/kubernetes/minikube/pull/11603)
* Fix embed-certs global config [#11576](https://github.com/kubernetes/minikube/pull/11576)
* Fix a download link to use arm64 instead of amd64 [#11653](https://github.com/kubernetes/minikube/pull/11653)
* fix downloading duplicate base image [#11690](https://github.com/kubernetes/minikube/pull/11690)
* fix multi-node loosing track of nodes after second restart [#11731](https://github.com/kubernetes/minikube/pull/11731)
* gcp-auth: do not override existing environment variables in pods [#11665](https://github.com/kubernetes/minikube/pull/11665)
Minor improvements:
* Allow running amd64 binary on M1 [#11674](https://github.com/kubernetes/minikube/pull/11674)
* improve containerd experience on cgroup v2 [#11632](https://github.com/kubernetes/minikube/pull/11632)
* Improve French locale [#11728](https://github.com/kubernetes/minikube/pull/11728)
* Fix UI error for stoppping systemd service [#11667](https://github.com/kubernetes/minikube/pull/11667)
* international languages: allow using LC_ALL env to set local language for windows [#11721](https://github.com/kubernetes/minikube/pull/11721)
* Change registery_mirror to registery-mirror [#11678](https://github.com/kubernetes/minikube/pull/11678)
Version Upgrades:
* ISO: Upgrade podman to 3.1.2 [#11704](https://github.com/kubernetes/minikube/pull/11704)
* Upgrade Buildroot to 2021.02 LTS with Linux 4.19 [#11688](https://github.com/kubernetes/minikube/pull/11688)
For a more detailed changelog, including changes occuring in pre-release versions, see [CHANGELOG.md](https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md).
Thank you to our contributors for this release!
- Anders F Björklund
- Andriy Dzikh
- Daehyeok Mun
- Dongjoon Hyun
- Felipe Crescencio de Oliveira
- Ilya Zuyev
- JacekDuszenko
- Jeff MAURY
- Medya Ghazizadeh
- Peixuan Ding
- RA489
- Sharif Elgamal
- Steven Powell
- Vishal Jain
- zhangdb-git
Thank you to our PR reviewers for this release!
- medyagh (63 comments)
- sharifelgamal (9 comments)
- ilya-zuyev (6 comments)
- andriyDev (3 comments)
- spowelljr (3 comments)
- afbjorklund (1 comments)
- prezha (1 comments)
- tharun208 (1 comments)
Thank you to our triage members for this release!
## Version 1.21.0 - 2021-06-10
* add more polish translations [#11587](https://github.com/kubernetes/minikube/pull/11587)
* Modify MetricsServer to use v1 api version (instead of v1beta1). [#11584](https://github.com/kubernetes/minikube/pull/11584)
For a more detailed changelog, including changes occuring in pre-release versions, see [CHANGELOG.md](https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md).
Thank you to our contributors for this release!
- Andriy Dzikh
- Ilya Zuyev
- JacekDuszenko
- Medya Ghazizadeh
- Sharif Elgamal
- Steven Powell
Thank you to our PR reviewers for this release!
- spowelljr (11 comments)
- medyagh (2 comments)
- sharifelgamal (2 comments)
- andriyDev (1 comments)
Thank you to our triage members for this release!
- RA489 (12 comments)
- andriyDev (10 comments)
- sharifelgamal (10 comments)
- JacekDuszenko (7 comments)
- spowelljr (5 comments)
Check out our [contributions leaderboard](https://minikube.sigs.k8s.io/docs/contrib/leaderboard/v1.21.0/) for this release!
## Version 1.21.0-beta.0 - 2021-06-02
Features:
* Support setting addons from environmental variables [#11469](https://github.com/kubernetes/minikube/pull/11469)
* Add "resume" as an alias for "unpause" [#11431](https://github.com/kubernetes/minikube/pull/11431)
* Implement target node option for `cp` command [#11304](https://github.com/kubernetes/minikube/pull/11304)
Bugs:
* Fix delete command for paused kic driver with containerd/crio runtime [#11504](https://github.com/kubernetes/minikube/pull/11504)
* kicbase: try image without sha before failing [#11559](https://github.com/kubernetes/minikube/pull/11559)
* bug: return error on invalid function name in extract.TranslatableStrings [#11454](https://github.com/kubernetes/minikube/pull/11454)
* Prevent downloading duplicate binaries already present in preload [#11461](https://github.com/kubernetes/minikube/pull/11461)
* gcp-auth addon: do not reapply gcp-auth yamls on minikube restart [#11486](https://github.com/kubernetes/minikube/pull/11486)
* Disable Non-Active Containers Runtimes [#11516](https://github.com/kubernetes/minikube/pull/11516)
* Persist custom addon image/registry settings. [#11432](https://github.com/kubernetes/minikube/pull/11432)
* Fix auto-pause on VMs (detect right control-plane IP) [#11438](https://github.com/kubernetes/minikube/pull/11438)
Version Upgrades:
* bump default k8s version to v1.20.7 and newest to v1.22.0-alpha.2 [#11525](https://github.com/kubernetes/minikube/pull/11525)
* containerd: upgrade `io.containerd.runtime.v1.linux` to `io.containerd.runc.v2` (suppot cgroup v2) [#11325](https://github.com/kubernetes/minikube/pull/11325)
* metallb-addon: Update metallb from 0.8.2 to 0.9.6 [#11410](https://github.com/kubernetes/minikube/pull/11410)
For a more detailed changelog, including changes occuring in pre-release versions, see [CHANGELOG.md](https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md).
Thank you to our contributors for this release!
- Akihiro Suda
- Alessandro Lenzen
- Anders F Björklund
- Andriy Dzikh
- Brian de Alwis
- Claudia J. Kang
- Daehyeok Mun
- Emma
- Evan Anderson
- Evan Baker
- Garen Torikian
- Ilya Zuyev
- Jasmine Hegman
- Kent Iso
- KushagraIndurkhya
- Li Zhijian
- Medya Ghazizadeh
- Peixuan Ding
- Predrag Rogic
- Sharif Elgamal
- Steven Powell
- TAKAHASHI Shuuji
- Thomas Güttler
- Tomasz Janiszewski
- Utkarsh Srivastava
- VigoTheHacker
- hex0punk
Thank you to our PR reviewers for this release!
- medyagh (129 comments)
- ilya-zuyev (20 comments)
- afbjorklund (10 comments)
- spowelljr (9 comments)
- sharifelgamal (5 comments)
- AkihiroSuda (1 comments)
- andriyDev (1 comments)
Thank you to our triage members for this release!
- afbjorklund (34 comments)
- medyagh (32 comments)
- andriyDev (14 comments)
- dinever (13 comments)
- ilya-zuyev (11 comments)
## Version 1.20.0 - 2021-05-06
Feature:

292
Makefile
View File

@ -14,7 +14,7 @@
# Bump these on release - and please check ISO_VERSION for correctness.
VERSION_MAJOR ?= 1
VERSION_MINOR ?= 20
VERSION_MINOR ?= 22
VERSION_BUILD ?= 0
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
VERSION ?= v$(RAW_VERSION)
@ -23,7 +23,7 @@ KUBERNETES_VERSION ?= $(shell egrep "DefaultKubernetesVersion =" pkg/minikube/co
KIC_VERSION ?= $(shell egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2)
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
ISO_VERSION ?= v1.20.0
ISO_VERSION ?= v1.22.0-1627488369-11483
# Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta
DEB_VERSION ?= $(subst -,~,$(RAW_VERSION))
DEB_REVISION ?= 0
@ -32,7 +32,7 @@ RPM_VERSION ?= $(DEB_VERSION)
RPM_REVISION ?= 0
# used by hack/jenkins/release_build_and_upload.sh and KVM_BUILD_IMAGE, see also BUILD_IMAGE below
GO_VERSION ?= 1.16.1
GO_VERSION ?= 1.16.6
# replace "x.y.0" => "x.y". kube-cross and golang.org/dl use different formats for x.y.0 go versions
KVM_GO_VERSION ?= $(GO_VERSION:.0=)
@ -40,7 +40,7 @@ KVM_GO_VERSION ?= $(GO_VERSION:.0=)
INSTALL_SIZE ?= $(shell du out/minikube-windows-amd64.exe | cut -f1)
BUILDROOT_BRANCH ?= 2020.02.12
REGISTRY?=gcr.io/k8s-minikube
REGISTRY ?= gcr.io/k8s-minikube
# Get git commit id
COMMIT_NO := $(shell git rev-parse HEAD 2> /dev/null || true)
@ -54,7 +54,9 @@ HYPERKIT_BUILD_IMAGE ?= neilotoole/xcgo:go1.15
BUILD_IMAGE ?= us.gcr.io/k8s-artifacts-prod/build-image/kube-cross:v$(GO_VERSION)-1
ISO_BUILD_IMAGE ?= $(REGISTRY)/buildroot-image
KVM_BUILD_IMAGE ?= $(REGISTRY)/kvm-build-image:$(KVM_GO_VERSION)
KVM_BUILD_IMAGE_AMD64 ?= $(REGISTRY)/kvm-build-image_amd64:$(KVM_GO_VERSION)
KVM_BUILD_IMAGE_ARM64 ?= $(REGISTRY)/kvm-build-image_arm64:$(KVM_GO_VERSION)
ISO_BUCKET ?= minikube/iso
@ -74,8 +76,7 @@ GOLINT_GOGC ?= 100
GOLINT_OPTIONS = --timeout 7m \
--build-tags "${MINIKUBE_INTEGRATION_BUILD_TAGS}" \
--enable gofmt,goimports,gocritic,golint,gocyclo,misspell,nakedret,stylecheck,unconvert,unparam,dogsled \
--exclude 'variable on range scope.*in function literal|ifElseChain' \
--skip-files "pkg/minikube/translate/translations.go|pkg/minikube/assets/assets.go"
--exclude 'variable on range scope.*in function literal|ifElseChain'
export GO111MODULE := on
@ -130,13 +131,15 @@ MINIKUBE_MARKDOWN_FILES := README.md CONTRIBUTING.md CHANGELOG.md
MINIKUBE_BUILD_TAGS :=
MINIKUBE_INTEGRATION_BUILD_TAGS := integration $(MINIKUBE_BUILD_TAGS)
CMD_SOURCE_DIRS = cmd pkg
CMD_SOURCE_DIRS = cmd pkg deploy/addons translations
SOURCE_DIRS = $(CMD_SOURCE_DIRS) test
SOURCE_PACKAGES = ./cmd/... ./pkg/... ./test/...
SOURCE_PACKAGES = ./cmd/... ./pkg/... ./deploy/addons/... ./translations/... ./test/...
SOURCE_GENERATED = pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go
SOURCE_FILES = $(shell find $(CMD_SOURCE_DIRS) -type f -name "*.go" | grep -v _test.go)
GOTEST_FILES = $(shell find $(CMD_SOURCE_DIRS) -type f -name "*.go" | grep _test.go)
ADDON_FILES = $(shell find "deploy/addons" -type f | grep -v "\.go")
TRANSLATION_FILES = $(shell find "translations" -type f | grep -v "\.go")
ASSET_FILES = $(ADDON_FILES) $(TRANSLATION_FILES)
# kvm2 ldflags
KVM2_LDFLAGS := -X k8s.io/minikube/pkg/drivers/kvm.version=$(VERSION) -X k8s.io/minikube/pkg/drivers/kvm.gitCommitID=$(COMMIT)
@ -195,7 +198,7 @@ ifneq ($(TEST_FILES),)
INTEGRATION_TESTS_TO_RUN := $(addprefix ./test/integration/, $(TEST_HELPERS) $(TEST_FILES))
endif
out/minikube$(IS_EXE): $(SOURCE_GENERATED) $(SOURCE_FILES) go.mod
out/minikube$(IS_EXE): $(SOURCE_FILES) $(ASSET_FILES) go.mod
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
$(call DOCKER,$(BUILD_IMAGE),GOOS=$(GOOS) GOARCH=$(GOARCH) GOARM=$(GOARM) /usr/bin/make $@)
else
@ -207,6 +210,10 @@ out/minikube-windows-amd64.exe: out/minikube-windows-amd64
$(if $(quiet),@echo " CP $@")
$(Q)cp $< $@
out/minikube-linux-i686: out/minikube-linux-386
$(if $(quiet),@echo " CP $@")
$(Q)cp $< $@
out/minikube-linux-x86_64: out/minikube-linux-amd64
$(if $(quiet),@echo " CP $@")
$(Q)cp $< $@
@ -240,7 +247,7 @@ minikube-windows-amd64.exe: out/minikube-windows-amd64.exe ## Build Minikube for
eq = $(and $(findstring x$(1),x$(2)),$(findstring x$(2),x$(1)))
out/minikube-%: $(SOURCE_GENERATED) $(SOURCE_FILES)
out/minikube-%: $(SOURCE_FILES) $(ASSET_FILES)
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
$(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@)
else
@ -249,6 +256,10 @@ else
go build -tags "$(MINIKUBE_BUILD_TAGS)" -ldflags="$(MINIKUBE_LDFLAGS)" -a -o $@ k8s.io/minikube/cmd/minikube
endif
out/minikube-linux-armv6: $(SOURCE_FILES) $(ASSET_FILES)
$(Q)GOOS=linux GOARCH=arm GOARM=6 \
go build -tags "$(MINIKUBE_BUILD_TAGS)" -ldflags="$(MINIKUBE_LDFLAGS)" -a -o $@ k8s.io/minikube/cmd/minikube
.PHONY: e2e-linux-amd64 e2e-linux-arm64 e2e-darwin-amd64 e2e-windows-amd64.exe
e2e-linux-amd64: out/e2e-linux-amd64 ## build end2end binary for Linux x86 64bit
e2e-linux-arm64: out/e2e-linux-arm64 ## build end2end binary for Linux ARM 64bit
@ -302,18 +313,23 @@ iso_in_docker:
--user $(shell id -u):$(shell id -g) --env HOME=/tmp --env IN_DOCKER=1 \
$(ISO_BUILD_IMAGE) /bin/bash
test-iso: $(SOURCE_GENERATED)
test-iso:
go test -v $(INTEGRATION_TESTS_TO_RUN) --tags=iso --minikube-start-args="--iso-url=file://$(shell pwd)/out/buildroot/output/images/rootfs.iso9660"
.PHONY: test-pkg
test-pkg/%: $(SOURCE_GENERATED) ## Trigger packaging test
test-pkg/%: ## Trigger packaging test
go test -v -test.timeout=60m ./$* --tags="$(MINIKUBE_BUILD_TAGS)"
.PHONY: all
all: cross drivers e2e-cross cross-tars exotic out/gvisor-addon ## Build all different minikube components
all: cross drivers e2e-cross cross-tars exotic retro out/gvisor-addon ## Build all different minikube components
.PHONY: drivers
drivers: docker-machine-driver-hyperkit docker-machine-driver-kvm2 ## Build Hyperkit and KVM2 drivers
drivers: ## Build Hyperkit and KVM2 drivers
drivers: docker-machine-driver-hyperkit \
docker-machine-driver-kvm2 \
out/docker-machine-driver-kvm2-amd64 \
out/docker-machine-driver-kvm2-arm64
.PHONY: docker-machine-driver-hyperkit
docker-machine-driver-hyperkit: out/docker-machine-driver-hyperkit ## Build Hyperkit driver
@ -356,15 +372,15 @@ else
endif
.PHONY: test
test: $(SOURCE_GENERATED) ## Trigger minikube test
test: ## Trigger minikube test
MINIKUBE_LDFLAGS="${MINIKUBE_LDFLAGS}" ./test.sh
.PHONY: generate-docs
generate-docs: out/minikube ## Automatically generate commands documentation.
out/minikube generate-docs --path ./site/content/en/docs/commands/ --test-path ./site/content/en/docs/contrib/tests.en.md
generate-docs: extract out/minikube ## Automatically generate commands documentation.
out/minikube generate-docs --path ./site/content/en/docs/commands/ --test-path ./site/content/en/docs/contrib/tests.en.md --code-path ./site/content/en/docs/contrib/errorcodes.en.md
.PHONY: gotest
gotest: $(SOURCE_GENERATED) ## Trigger minikube test
gotest: ## Trigger minikube test
$(if $(quiet),@echo " TEST $@")
$(Q)go test -tags "$(MINIKUBE_BUILD_TAGS)" -ldflags="$(MINIKUBE_LDFLAGS)" $(MINIKUBE_TEST_FILES)
@ -385,43 +401,19 @@ out/coverage.html: out/coverage.out
$(if $(quiet),@echo " COVER $@")
$(Q)go tool cover -html=$< -o $@
.PHONY: extract
extract: ## Compile extract tool
.PHONY: extract
extract: ## extract internationalization words for translations
go run cmd/extract/extract.go
# Regenerates assets.go when template files have been updated
pkg/minikube/assets/assets.go: $(shell find "deploy/addons" -type f)
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
$(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@)
endif
@which go-bindata >/dev/null 2>&1 || GO111MODULE=off GOBIN="$(GOPATH)$(DIRSEP)bin" go get github.com/go-bindata/go-bindata/...
$(if $(quiet),@echo " GEN $@")
$(Q)PATH="$(PATH)$(PATHSEP)$(GOPATH)$(DIRSEP)bin" go-bindata -nomemcopy -o $@ -pkg assets deploy/addons/...
$(Q)-gofmt -s -w $@
@#golint: Dns should be DNS (compat sed)
@sed -i -e 's/Dns/DNS/g' $@ && rm -f ./-e
@#golint: Html should be HTML (compat sed)
@sed -i -e 's/Html/HTML/g' $@ && rm -f ./-e
@#golint: don't use underscores in Go names
@sed -i -e 's/SnapshotStorageK8sIo_volumesnapshot/SnapshotStorageK8sIoVolumesnapshot/g' $@ && rm -f ./-e
pkg/minikube/translate/translations.go: $(shell find "translations/" -type f)
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
$(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@)
endif
@which go-bindata >/dev/null 2>&1 || GO111MODULE=off GOBIN="$(GOPATH)$(DIRSEP)bin" go get github.com/go-bindata/go-bindata/...
$(if $(quiet),@echo " GEN $@")
$(Q)PATH="$(PATH)$(PATHSEP)$(GOPATH)$(DIRSEP)bin" go-bindata -nomemcopy -o $@ -pkg translate translations/...
$(Q)-gofmt -s -w $@
@#golint: Json should be JSON (compat sed)
@sed -i -e 's/Json/JSON/' $@ && rm -f ./-e
.PHONY: cross
cross: minikube-linux-amd64 minikube-darwin-amd64 minikube-windows-amd64.exe ## Build minikube for all platform
.PHONY: exotic
exotic: out/minikube-linux-arm out/minikube-linux-arm64 out/minikube-linux-ppc64le out/minikube-linux-s390x ## Build minikube for non-amd64 linux
.PHONY: retro
retro: out/minikube-linux-386 out/minikube-linux-armv6 ## Build minikube for legacy 32-bit linux
.PHONY: windows
windows: minikube-windows-amd64.exe ## Build minikube for Windows 64bit
@ -439,7 +431,8 @@ checksum: ## Generate checksums
for f in out/minikube.iso out/minikube-linux-amd64 out/minikube-linux-arm \
out/minikube-linux-arm64 out/minikube-linux-ppc64le out/minikube-linux-s390x \
out/minikube-darwin-amd64 out/minikube-windows-amd64.exe \
out/docker-machine-driver-kvm2 out/docker-machine-driver-hyperkit; do \
out/docker-machine-driver-kvm2 out/docker-machine-driver-kvm2-amd64 out/docker-machine-driver-kvm2-arm64 \
out/docker-machine-driver-hyperkit; do \
if [ -f "$${f}" ]; then \
openssl sha256 "$${f}" | awk '{print $$2}' > "$${f}.sha256" ; \
fi ; \
@ -479,7 +472,7 @@ goimports: ## Run goimports and list the files differs from goimport's
@test -z "`goimports -l $(SOURCE_DIRS)`"
.PHONY: golint
golint: $(SOURCE_GENERATED) ## Run golint
golint: ## Run golint
@golint -set_exit_status $(SOURCE_PACKAGES)
.PHONY: gocyclo
@ -494,17 +487,17 @@ out/linters/golangci-lint-$(GOLINT_VERSION):
# this one is meant for local use
.PHONY: lint
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
lint: $(SOURCE_GENERATED)
lint:
docker run --rm -v $(pwd):/app -w /app golangci/golangci-lint:$(GOLINT_VERSION) \
golangci-lint run ${GOLINT_OPTIONS} --skip-dirs "cmd/drivers/kvm|cmd/drivers/hyperkit|pkg/drivers/kvm|pkg/drivers/hyperkit" ./...
else
lint: $(SOURCE_GENERATED) out/linters/golangci-lint-$(GOLINT_VERSION) ## Run lint
lint: out/linters/golangci-lint-$(GOLINT_VERSION) ## Run lint
./out/linters/golangci-lint-$(GOLINT_VERSION) run ${GOLINT_OPTIONS} ./...
endif
# lint-ci is slower version of lint and is meant to be used in ci (travis) to avoid out of memory leaks.
.PHONY: lint-ci
lint-ci: $(SOURCE_GENERATED) out/linters/golangci-lint-$(GOLINT_VERSION) ## Run lint-ci
lint-ci: out/linters/golangci-lint-$(GOLINT_VERSION) ## Run lint-ci
GOGC=${GOLINT_GOGC} ./out/linters/golangci-lint-$(GOLINT_VERSION) run \
--concurrency ${GOLINT_JOBS} ${GOLINT_OPTIONS} ./...
@ -522,15 +515,15 @@ mdlint:
verify-iso: # Make sure the current ISO exists in the expected bucket
gsutil stat gs://$(ISO_BUCKET)/minikube-$(ISO_VERSION).iso
out/docs/minikube.md: $(shell find "cmd") $(shell find "pkg/minikube/constants") $(SOURCE_GENERATED)
out/docs/minikube.md: $(shell find "cmd") $(shell find "pkg/minikube/constants")
go run -ldflags="$(MINIKUBE_LDFLAGS)" -tags gendocs hack/help_text/gen_help_text.go
.PHONY: debs ## Build all deb packages
debs: out/minikube_$(DEB_VERSION)-$(DEB_REVISION)_amd64.deb \
out/minikube_$(DEB_VERSION)-$(DEB_REVISION)_arm64.deb \
out/docker-machine-driver-kvm2_$(DEB_VERSION).deb
out/docker-machine-driver-kvm2_$(DEB_VERSION).deb \
out/docker-machine-driver-kvm2_$(DEB_VERSION)-$(DEB_REVISION)_amd64.deb \
out/docker-machine-driver-kvm2_$(DEB_VERSION)-$(DEB_REVISION)_arm64.deb
.PHONY: deb_version
deb_version:
@ -595,7 +588,9 @@ out/repodata/repomd.xml: out/minikube-$(RPM_VERSION).rpm
.SECONDEXPANSION:
TAR_TARGETS_linux-amd64 := out/minikube-linux-amd64 out/docker-machine-driver-kvm2
TAR_TARGETS_linux-arm64 := out/minikube-linux-arm64 #out/docker-machine-driver-kvm2
TAR_TARGETS_darwin-amd64 := out/minikube-darwin-amd64 out/docker-machine-driver-hyperkit
TAR_TARGETS_darwin-arm64 := out/minikube-darwin-arm64 #out/docker-machine-driver-hyperkit
TAR_TARGETS_windows-amd64 := out/minikube-windows-amd64.exe
out/minikube-%.tar.gz: $$(TAR_TARGETS_$$*)
$(if $(quiet),@echo " TAR $@")
@ -649,8 +644,8 @@ release-hyperkit-driver: install-hyperkit-driver checksum ## Copy hyperkit using
gsutil cp $(GOBIN)/docker-machine-driver-hyperkit.sha256 gs://minikube/drivers/hyperkit/$(VERSION)/
.PHONY: check-release
check-release: $(SOURCE_GENERATED) ## Execute go test
go test -v ./deploy/minikube/release_sanity_test.go -tags=release
check-release: ## Execute go test
go test -timeout 42m -v ./deploy/minikube/release_sanity_test.go
buildroot-image: $(ISO_BUILD_IMAGE) # convenient alias to build the docker container
$(ISO_BUILD_IMAGE): deploy/iso/minikube-iso/Dockerfile
@ -692,8 +687,23 @@ KICBASE_IMAGE_GCR ?= $(REGISTRY)/kicbase:$(KIC_VERSION)
KICBASE_IMAGE_HUB ?= kicbase/stable:$(KIC_VERSION)
KICBASE_IMAGE_REGISTRIES ?= $(KICBASE_IMAGE_GCR) $(KICBASE_IMAGE_HUB)
.PHONY: local-kicbase
local-kicbase: ## Builds the kicbase image and tags it local/kicbase:latest and local/kicbase:$(KIC_VERSION)-$(COMMIT_SHORT)
docker build -f ./deploy/kicbase/Dockerfile -t local/kicbase:$(KIC_VERSION) --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --cache-from $(KICBASE_IMAGE_GCR) .
docker tag local/kicbase:$(KIC_VERSION) local/kicbase:latest
docker tag local/kicbase:$(KIC_VERSION) local/kicbase:$(KIC_VERSION)-$(COMMIT_SHORT)
SED = sed -i
ifeq ($(GOOS),darwin)
SED = sed -i ''
endif
.PHONY: local-kicbase-debug
local-kicbase-debug: local-kicbase ## Builds a local kicbase image and switches source code to point to it
$(SED) 's|Version = .*|Version = \"$(KIC_VERSION)-$(COMMIT_SHORT)\"|;s|baseImageSHA = .*|baseImageSHA = \"\"|;s|gcrRepo = .*|gcrRepo = \"local/kicbase\"|;s|dockerhubRepo = .*|dockerhubRepo = \"local/kicbase\"|' pkg/drivers/kic/types.go
.PHONY: push-kic-base-image
push-kic-base-image: deploy/kicbase/auto-pause docker-multi-arch-builder ## Push multi-arch local/kicbase:latest to all remote registries
push-kic-base-image: docker-multi-arch-builder ## Push multi-arch local/kicbase:latest to all remote registries
ifdef AUTOPUSH
docker login gcr.io/k8s-minikube
docker login docker.pkg.github.com
@ -704,7 +714,7 @@ endif
ifndef CIBUILD
$(call user_confirm, 'Are you sure you want to push $(KICBASE_IMAGE_REGISTRIES) ?')
endif
env $(X_BUILD_ENV) docker buildx build --builder $(X_DOCKER_BUILDER) --platform $(KICBASE_ARCH) $(addprefix -t ,$(KICBASE_IMAGE_REGISTRIES)) --push --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) ./deploy/kicbase
env $(X_BUILD_ENV) docker buildx build -f ./deploy/kicbase/Dockerfile --builder $(X_DOCKER_BUILDER) --platform $(KICBASE_ARCH) $(addprefix -t ,$(KICBASE_IMAGE_REGISTRIES)) --push --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) .
out/preload-tool:
go build -ldflags="$(MINIKUBE_LDFLAGS)" -o $@ ./hack/preload-images/*.go
@ -723,11 +733,13 @@ TAG = $(STORAGE_PROVISIONER_TAG)
.PHONY: push-storage-provisioner-manifest
push-storage-provisioner-manifest: $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~storage\-provisioner\-image\-&~g") ## Push multi-arch storage-provisioner image
ifndef CIBUILD
docker login gcr.io/k8s-minikube
endif
set -x; for arch in $(ALL_ARCH); do docker push ${IMAGE}-$${arch}:${TAG}; done
docker manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g")
set -x; for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${IMAGE}:${TAG} ${IMAGE}-$${arch}:${TAG}; done
docker manifest push $(STORAGE_PROVISIONER_MANIFEST)
$(X_BUILD_ENV) docker manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g")
set -x; for arch in $(ALL_ARCH); do $(X_BUILD_ENV) docker manifest annotate --arch $${arch} ${IMAGE}:${TAG} ${IMAGE}-$${arch}:${TAG}; done
$(X_BUILD_ENV) docker manifest push $(STORAGE_PROVISIONER_MANIFEST)
.PHONY: push-docker
push-docker: # Push docker image base on to IMAGE variable (used internally by other targets)
@ -738,7 +750,7 @@ endif
docker push $(IMAGE)
.PHONY: out/gvisor-addon
out/gvisor-addon: $(SOURCE_GENERATED) ## Build gvisor addon
out/gvisor-addon: ## Build gvisor addon
$(if $(quiet),@echo " GO $@")
$(Q)GOOS=linux CGO_ENABLED=0 go build -o $@ cmd/gvisor/gvisor.go
@ -761,6 +773,14 @@ release-minikube: out/minikube checksum ## Minikube release
gsutil cp out/minikube-$(GOOS)-$(GOARCH) $(MINIKUBE_UPLOAD_LOCATION)/$(MINIKUBE_VERSION)/minikube-$(GOOS)-$(GOARCH)
gsutil cp out/minikube-$(GOOS)-$(GOARCH).sha256 $(MINIKUBE_UPLOAD_LOCATION)/$(MINIKUBE_VERSION)/minikube-$(GOOS)-$(GOARCH).sha256
.PHONY: release-notes
release-notes:
hack/release_notes.sh
.PHONY: update-leaderboard
update-leaderboard:
hack/update_contributions.sh
out/docker-machine-driver-kvm2: out/docker-machine-driver-kvm2-amd64
$(if $(quiet),@echo " CP $@")
$(Q)cp $< $@
@ -773,10 +793,81 @@ out/docker-machine-driver-kvm2-aarch64: out/docker-machine-driver-kvm2-arm64
$(if $(quiet),@echo " CP $@")
$(Q)cp $< $@
out/docker-machine-driver-kvm2_$(DEB_VERSION).deb: out/docker-machine-driver-kvm2_$(DEB_VERSION)-0_amd64.deb
cp $< $@
out/docker-machine-driver-kvm2_$(DEB_VERSION)-0_%.deb: out/docker-machine-driver-kvm2-%
cp -r installers/linux/deb/kvm2_deb_template out/docker-machine-driver-kvm2_$(DEB_VERSION)
chmod 0755 out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN
sed -E -i -e 's/--VERSION--/$(DEB_VERSION)/g' out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN/control
sed -E -i -e 's/--ARCH--/'$*'/g' out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN/control
mkdir -p out/docker-machine-driver-kvm2_$(DEB_VERSION)/usr/bin
cp $< out/docker-machine-driver-kvm2_$(DEB_VERSION)/usr/bin/docker-machine-driver-kvm2
fakeroot dpkg-deb --build out/docker-machine-driver-kvm2_$(DEB_VERSION) $@
rm -rf out/docker-machine-driver-kvm2_$(DEB_VERSION)
out/docker-machine-driver-kvm2-$(RPM_VERSION).rpm: out/docker-machine-driver-kvm2-$(RPM_VERSION)-0.x86_64.rpm
cp $< $@
out/docker-machine-driver-kvm2_$(RPM_VERSION).amd64.rpm: out/docker-machine-driver-kvm2-$(RPM_VERSION)-0.x86_64.rpm
cp $< $@
out/docker-machine-driver-kvm2_$(RPM_VERSION).arm64.rpm: out/docker-machine-driver-kvm2-$(RPM_VERSION)-0.aarch64.rpm
cp $< $@
out/docker-machine-driver-kvm2-$(RPM_VERSION)-0.%.rpm: out/docker-machine-driver-kvm2-%
cp -r installers/linux/rpm/kvm2_rpm_template out/docker-machine-driver-kvm2-$(RPM_VERSION)
sed -E -i -e 's/--VERSION--/'$(RPM_VERSION)'/g' out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
sed -E -i -e 's|--OUT--|'$(PWD)/out'|g' out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
rpmbuild -bb -D "_rpmdir $(PWD)/out" --target $* \
out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
@mv out/$*/docker-machine-driver-kvm2-$(RPM_VERSION)-0.$*.rpm out/ && rmdir out/$*
rm -rf out/docker-machine-driver-kvm2-$(RPM_VERSION)
.PHONY: kvm-image-amd64
kvm-image-amd64: installers/linux/kvm/Dockerfile.amd64 ## Convenient alias to build the docker container
docker build --build-arg "GO_VERSION=$(KVM_GO_VERSION)" -t $(KVM_BUILD_IMAGE_AMD64) -f $< $(dir $<)
@echo ""
@echo "$(@) successfully built"
.PHONY: kvm-image-arm64
kvm-image-arm64: installers/linux/kvm/Dockerfile.arm64 ## Convenient alias to build the docker container
docker build --build-arg "GO_VERSION=$(KVM_GO_VERSION)" -t $(KVM_BUILD_IMAGE_ARM64) -f $< $(dir $<)
@echo ""
@echo "$(@) successfully built"
kvm_in_docker:
docker image inspect -f '{{.Id}} {{.RepoTags}}' $(KVM_BUILD_IMAGE_AMD64) || $(MAKE) kvm-image-amd64
rm -f out/docker-machine-driver-kvm2
$(call DOCKER,$(KVM_BUILD_IMAGE_AMD64),/usr/bin/make out/docker-machine-driver-kvm2 COMMIT=$(COMMIT))
.PHONY: install-kvm-driver
install-kvm-driver: out/docker-machine-driver-kvm2 ## Install KVM Driver
mkdir -p $(GOBIN)
cp out/docker-machine-driver-kvm2 $(GOBIN)/docker-machine-driver-kvm2
out/docker-machine-driver-kvm2-arm64:
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
docker image inspect -f '{{.Id}} {{.RepoTags}}' $(KVM_BUILD_IMAGE_ARM64) || $(MAKE) kvm-image-arm64
$(call DOCKER,$(KVM_BUILD_IMAGE_ARM64),/usr/bin/make $@ COMMIT=$(COMMIT))
else
$(if $(quiet),@echo " GO $@")
$(Q)GOARCH=arm64 \
go build \
-installsuffix "static" \
-ldflags="$(KVM2_LDFLAGS)" \
-tags "libvirt.1.3.1 without_lxc" \
-o $@ \
k8s.io/minikube/cmd/drivers/kvm
endif
chmod +X $@
out/docker-machine-driver-kvm2-%:
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
docker image inspect -f '{{.Id}} {{.RepoTags}}' $(KVM_BUILD_IMAGE) || $(MAKE) kvm-image
$(call DOCKER,$(KVM_BUILD_IMAGE),/usr/bin/make $@ COMMIT=$(COMMIT))
docker image inspect -f '{{.Id}} {{.RepoTags}}' $(KVM_BUILD_IMAGE_AMD64) || $(MAKE) kvm-image-amd64
$(call DOCKER,$(KVM_BUILD_IMAGE_AMD64),/usr/bin/make $@ COMMIT=$(COMMIT))
# make extra sure that we are linking with the older version of libvirt (1.3.1)
test "`strings $@ | grep '^LIBVIRT_[0-9]' | sort | tail -n 1`" = "LIBVIRT_1.2.9"
else
@ -791,51 +882,6 @@ else
endif
chmod +X $@
out/docker-machine-driver-kvm2_$(DEB_VERSION).deb: out/docker-machine-driver-kvm2_$(DEB_VERSION)-0_amd64.deb
cp $< $@
out/docker-machine-driver-kvm2_$(DEB_VERSION)-0_%.deb: out/docker-machine-driver-kvm2-%
cp -r installers/linux/deb/kvm2_deb_template out/docker-machine-driver-kvm2_$(DEB_VERSION)
chmod 0755 out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN
sed -E -i 's/--VERSION--/'$(DEB_VERSION)'/g' out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN/control
sed -E -i 's/--ARCH--/'$*'/g' out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN/control
mkdir -p out/docker-machine-driver-kvm2_$(DEB_VERSION)/usr/bin
cp $< out/docker-machine-driver-kvm2_$(DEB_VERSION)/usr/bin/docker-machine-driver-kvm2
fakeroot dpkg-deb --build out/docker-machine-driver-kvm2_$(DEB_VERSION) $@
rm -rf out/docker-machine-driver-kvm2_$(DEB_VERSION)
out/docker-machine-driver-kvm2-$(RPM_VERSION).rpm: out/docker-machine-driver-kvm2-$(RPM_VERSION)-0.x86_64.deb
cp $< $@
out/docker-machine-driver-kvm2-$(RPM_VERSION)-0.%.rpm: out/docker-machine-driver-kvm2-%
cp -r installers/linux/rpm/kvm2_rpm_template out/docker-machine-driver-kvm2-$(RPM_VERSION)
sed -E -i 's/--VERSION--/'$(RPM_VERSION)'/g' out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
sed -E -i 's|--OUT--|'$(PWD)/out'|g' out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
rpmbuild -bb -D "_rpmdir $(PWD)/out" --target $* \
out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
@mv out/$*/docker-machine-driver-kvm2-$(RPM_VERSION)-0.$*.rpm out/ && rmdir out/$*
rm -rf out/docker-machine-driver-kvm2-$(RPM_VERSION)
.PHONY: kvm-image
kvm-image: installers/linux/kvm/Dockerfile ## Convenient alias to build the docker container
docker build --build-arg "GO_VERSION=$(KVM_GO_VERSION)" -t $(KVM_BUILD_IMAGE) -f $< $(dir $<)
@echo ""
@echo "$(@) successfully built"
kvm_in_docker:
docker image inspect -f '{{.Id}} {{.RepoTags}}' $(KVM_BUILD_IMAGE) || $(MAKE) kvm-image
rm -f out/docker-machine-driver-kvm2
$(call DOCKER,$(KVM_BUILD_IMAGE),/usr/bin/make out/docker-machine-driver-kvm2 COMMIT=$(COMMIT))
.PHONY: install-kvm-driver
install-kvm-driver: out/docker-machine-driver-kvm2 ## Install KVM Driver
mkdir -p $(GOBIN)
cp out/docker-machine-driver-kvm2 $(GOBIN)/docker-machine-driver-kvm2
.PHONY: release-kvm-driver
release-kvm-driver: install-kvm-driver checksum ## Release KVM Driver
gsutil cp $(GOBIN)/docker-machine-driver-kvm2 gs://minikube/drivers/kvm/$(VERSION)/
gsutil cp $(GOBIN)/docker-machine-driver-kvm2.sha256 gs://minikube/drivers/kvm/$(VERSION)/
site/themes/docsy/assets/vendor/bootstrap/package.js: ## update the website docsy theme git submodule
git submodule update -f --init --recursive
@ -843,8 +889,7 @@ site/themes/docsy/assets/vendor/bootstrap/package.js: ## update the website docs
out/hugo/hugo:
mkdir -p out
test -d out/hugo || git clone https://github.com/gohugoio/hugo.git out/hugo
go get golang.org/dl/go1.16 && go1.16 download
(cd out/hugo && go1.16 build --tags extended)
(cd out/hugo && go build --tags extended)
.PHONY: site
site: site/themes/docsy/assets/vendor/bootstrap/package.js out/hugo/hugo ## Serve the documentation site to localhost
@ -858,17 +903,14 @@ site: site/themes/docsy/assets/vendor/bootstrap/package.js out/hugo/hugo ## Serv
out/mkcmp:
GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o $@ cmd/performance/mkcmp/main.go
.PHONY: deploy/kicbase/auto-pause # auto pause binary to be used for kic image work around for not passing the whole repo as docker context
deploy/kicbase/auto-pause: $(SOURCE_GENERATED) $(SOURCE_FILES)
GOOS=linux GOARCH=$(GOARCH) go build -o $@ cmd/auto-pause/auto-pause.go
# auto pause binary to be used for ISO
deploy/iso/minikube-iso/board/coreos/minikube/rootfs-overlay/usr/bin/auto-pause: $(SOURCE_GENERATED) $(SOURCE_FILES)
deploy/iso/minikube-iso/board/coreos/minikube/rootfs-overlay/usr/bin/auto-pause: $(SOURCE_FILES) $(ASSET_FILES)
GOOS=linux GOARCH=$(GOARCH) go build -o $@ cmd/auto-pause/auto-pause.go
.PHONY: deploy/addons/auto-pause/auto-pause-hook
deploy/addons/auto-pause/auto-pause-hook: $(SOURCE_GENERATED) ## Build auto-pause hook addon
deploy/addons/auto-pause/auto-pause-hook: ## Build auto-pause hook addon
$(if $(quiet),@echo " GO $@")
$(Q)GOOS=linux CGO_ENABLED=0 go build -a --ldflags '-extldflags "-static"' -tags netgo -installsuffix netgo -o $@ cmd/auto-pause/auto-pause-hook/main.go cmd/auto-pause/auto-pause-hook/config.go cmd/auto-pause/auto-pause-hook/certs.go
@ -916,6 +958,12 @@ help:
@grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
.PHONY: update-golang-version
update-golang-version:
(cd hack/update/golang_version && \
go run update_golang_version.go)
.PHONY: update-kubernetes-version
update-kubernetes-version:
(cd hack/update/kubernetes_version && \
@ -949,6 +997,10 @@ cpu-benchmark-idle: ## run the cpu usage 5 minutes idle benchmark
cpu-benchmark-autopause: ## run the cpu usage auto-pause benchmark
./hack/benchmark/cpu_usage/auto_pause/benchmark_local_k8s.sh
.PHONY: time-to-k8s-benchmark
time-to-k8s-benchmark:
./hack/benchmark/time-to-k8s/time-to-k8s.sh
.PHONY: update-gopogh-version
update-gopogh-version: ## update gopogh version
(cd hack/update/gopogh_version && \

9
OWNERS
View File

@ -1,21 +1,19 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- tstromberg
- afbjorklund
- sharifelgamal
- RA489
- medyagh
- blueelvis
- prasadkatti
- ilya-zuyev
- prezha
- spowelljr
approvers:
- tstromberg
- afbjorklund
- sharifelgamal
- medyagh
- ilya-zuyev
- spowelljr
- prezha
emeritus_approvers:
- dlorenc
- luxas
@ -24,3 +22,4 @@ emeritus_approvers:
- aaron-prindle
- priyawadhwa
- josedonizetti
- tstromberg

View File

@ -25,7 +25,7 @@ minikube runs the latest stable release of Kubernetes, with support for standard
* [Persistent Volumes](https://minikube.sigs.k8s.io/docs/handbook/persistent_volumes/)
* [Ingress](https://kubernetes.io/docs/tasks/access-application-cluster/ingress-minikube/)
* [Dashboard](https://minikube.sigs.k8s.io/docs/handbook/dashboard/) - `minikube dashboard`
* [Container runtimes](https://minikube.sigs.k8s.io/docs/handbook/config/#runtime-configuration) - `start --container-runtime`
* [Container runtimes](https://minikube.sigs.k8s.io/docs/handbook/config/#runtime-configuration) - `minikube start --container-runtime`
* [Configure apiserver and kubelet options](https://minikube.sigs.k8s.io/docs/handbook/config/#modifying-kubernetes-defaults) via command-line flags
As well as developer-friendly features:

View File

@ -17,6 +17,7 @@ limitations under the License.
package main
import (
"flag"
"fmt"
"log"
"net/http"
@ -39,10 +40,11 @@ var mu sync.Mutex
var runtimePaused bool
var version = "0.0.1"
// TODO: #10597 make this configurable to support containerd/cri-o
var runtime = "docker"
var runtime = flag.String("container-runtime", "docker", "Container runtime to use for (un)pausing")
func main() {
flag.Parse()
// TODO: #10595 make this configurable
const interval = time.Minute * 1
@ -89,7 +91,7 @@ func runPause() {
r := command.NewExecRunner(true)
cr, err := cruntime.New(cruntime.Config{Type: runtime, Runner: r})
cr, err := cruntime.New(cruntime.Config{Type: *runtime, Runner: r})
if err != nil {
exit.Error(reason.InternalNewRuntime, "Failed runtime", err)
}
@ -111,7 +113,7 @@ func runUnpause() {
r := command.NewExecRunner(true)
cr, err := cruntime.New(cruntime.Config{Type: runtime, Runner: r})
cr, err := cruntime.New(cruntime.Config{Type: *runtime, Runner: r})
if err != nil {
exit.Error(reason.InternalNewRuntime, "Failed runtime", err)
}
@ -130,7 +132,7 @@ func alreadyPaused() {
defer mu.Unlock()
r := command.NewExecRunner(true)
cr, err := cruntime.New(cruntime.Config{Type: runtime, Runner: r})
cr, err := cruntime.New(cruntime.Config{Type: *runtime, Runner: r})
if err != nil {
exit.Error(reason.InternalNewRuntime, "Failed runtime", err)
}

View File

@ -52,7 +52,7 @@ var addCacheCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
out.WarningT("\"minikube cache\" will be deprecated in upcoming versions, please switch to \"minikube image load\"")
// Cache and load images into docker daemon
if err := machine.CacheAndLoadImages(args, cacheAddProfiles()); err != nil {
if err := machine.CacheAndLoadImages(args, cacheAddProfiles(), false); err != nil {
exit.Error(reason.InternalCacheLoad, "Failed to cache and load images", err)
}
// Add images to config file

View File

@ -77,25 +77,52 @@ var completionCmd = &cobra.Command{
}
if args[0] != "bash" && args[0] != "zsh" && args[0] != "fish" {
exit.Message(reason.Usage, "Sorry, completion support is not yet implemented for {{.name}}", out.V{"name": args[0]})
} else if args[0] == "bash" {
err := GenerateBashCompletion(os.Stdout, cmd.Parent())
if err != nil {
exit.Error(reason.InternalCompletion, "bash completion failed", err)
}
} else if args[0] == "zsh" {
err := GenerateZshCompletion(os.Stdout, cmd.Parent())
if err != nil {
exit.Error(reason.InternalCompletion, "zsh completion failed", err)
}
} else {
err := GenerateFishCompletion(os.Stdout, cmd.Parent())
if err != nil {
exit.Error(reason.InternalCompletion, "fish completion failed", err)
}
}
},
}
var bashCmd = &cobra.Command{
Use: "bash",
Short: "bash completion.",
Long: "Generate command completion for bash.",
Run: func(cmd *cobra.Command, args []string) {
err := GenerateBashCompletion(os.Stdout, cmd.Root())
if err != nil {
exit.Error(reason.InternalCompletion, "bash completion failed", err)
}
},
}
var zshCmd = &cobra.Command{
Use: "zsh",
Short: "zsh completion.",
Long: "Generate command completion for zsh.",
Run: func(cmd *cobra.Command, args []string) {
err := GenerateZshCompletion(os.Stdout, cmd.Root())
if err != nil {
exit.Error(reason.InternalCompletion, "zsh completion failed", err)
}
},
}
var fishCmd = &cobra.Command{
Use: "fish",
Short: "fish completion.",
Long: "Generate command completion for fish .",
Run: func(cmd *cobra.Command, args []string) {
err := GenerateFishCompletion(os.Stdout, cmd.Root())
if err != nil {
exit.Error(reason.InternalCompletion, "fish completion failed", err)
}
},
}
func init() {
completionCmd.AddCommand(bashCmd)
completionCmd.AddCommand(zshCmd)
completionCmd.AddCommand(fishCmd)
}
// GenerateBashCompletion generates the completion for the bash shell
func GenerateBashCompletion(w io.Writer, cmd *cobra.Command) error {
_, err := w.Write([]byte(boilerPlate))

View File

@ -98,7 +98,7 @@ var printAddonsList = func(cc *config.ClusterConfig) {
var tData [][]string
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Addon Name", "Profile", "Status"})
table.SetHeader([]string{"Addon Name", "Profile", "Status", "Maintainer"})
table.SetAutoFormatHeaders(true)
table.SetBorders(tablewriter.Border{Left: true, Top: true, Right: true, Bottom: true})
table.SetCenterSeparator("|")
@ -106,7 +106,11 @@ var printAddonsList = func(cc *config.ClusterConfig) {
for _, addonName := range addonNames {
addonBundle := assets.Addons[addonName]
enabled := addonBundle.IsEnabled(cc)
tData = append(tData, []string{addonName, cc.Name, fmt.Sprintf("%s %s", stringFromStatus(enabled), iconFromStatus(enabled))})
maintainer := addonBundle.Maintainer
if maintainer == "" {
maintainer = "unknown (third-party)"
}
tData = append(tData, []string{addonName, cc.Name, fmt.Sprintf("%s %s", stringFromStatus(enabled), iconFromStatus(enabled)), maintainer})
}
table.AppendBulk(tData)

View File

@ -76,7 +76,7 @@ var settings = []Setting{
{
name: "cpus",
set: SetInt,
validations: []setFn{IsPositive},
validations: []setFn{IsValidCPUs},
callbacks: []setFn{RequiresRestartMsg},
},
{
@ -122,22 +122,14 @@ var settings = []Setting{
name: config.ReminderWaitPeriodInHours,
set: SetInt,
},
{
name: config.WantReportError,
set: SetBool,
},
{
name: config.WantReportErrorPrompt,
set: SetBool,
},
{
name: config.WantKubectlDownloadMsg,
set: SetBool,
},
{
name: config.WantNoneDriverWarning,
set: SetBool,
},
{
name: config.WantVirtualBoxDriverWarning,
set: SetBool,
},
{
name: config.ProfileName,
set: SetString,
@ -146,14 +138,6 @@ var settings = []Setting{
name: Bootstrapper,
set: SetString,
},
{
name: config.ShowDriverDeprecationNotification,
set: SetBool,
},
{
name: config.ShowBootstrapperDeprecationNotification,
set: SetBool,
},
{
name: "insecure-registry",
set: SetString,
@ -172,7 +156,7 @@ var settings = []Setting{
setMap: SetMap,
},
{
name: "embed-certs",
name: config.EmbedCerts,
set: SetBool,
},
{

View File

@ -40,7 +40,7 @@ var addonsDisableCmd = &cobra.Command{
}
err := addons.SetAndSave(ClusterFlagValue(), addon, "false")
if err != nil {
exit.Error(reason.InternalDisable, "disable failed", err)
exit.Error(reason.InternalAddonDisable, "disable failed", err)
}
out.Step(style.AddonDisable, `"The '{{.minikube_addon}}' addon is disabled`, out.V{"minikube_addon": addon})
},

View File

@ -49,7 +49,7 @@ var addonsEnableCmd = &cobra.Command{
viper.Set(config.AddonRegistries, registries)
err := addons.SetAndSave(ClusterFlagValue(), addon, "true")
if err != nil {
exit.Error(reason.InternalEnable, "enable failed", err)
exit.Error(reason.InternalAddonEnable, "enable failed", err)
}
if addon == "dashboard" {
tipProfileArg := ""
@ -77,5 +77,6 @@ func init() {
addonsEnableCmd.Flags().StringVar(&images, "images", "", "Images used by this addon. Separated by commas.")
addonsEnableCmd.Flags().StringVar(&registries, "registries", "", "Registries used by this addon. Separated by commas.")
addonsEnableCmd.Flags().BoolVar(&addons.Force, "force", false, "If true, will perform potentially dangerous operations. Use with discretion.")
addonsEnableCmd.Flags().BoolVar(&addons.Refresh, "refresh", false, "If true, pods might get deleted and restarted on addon enable")
AddonsCmd.AddCommand(addonsEnableCmd)
}

View File

@ -25,6 +25,7 @@ import (
"strings"
units "github.com/docker/go-units"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/out"
@ -53,8 +54,19 @@ func IsValidDiskSize(name string, disksize string) error {
return nil
}
// IsValidCPUs checks if a string is a valid number of CPUs
func IsValidCPUs(name string, cpus string) error {
if cpus == constants.MaxResources {
return nil
}
return IsPositive(name, cpus)
}
// IsValidMemory checks if a string is a valid memory size
func IsValidMemory(name string, memsize string) error {
if memsize == constants.MaxResources {
return nil
}
_, err := units.FromHumanSize(memsize)
if err != nil {
return fmt.Errorf("invalid memory size: %v", err)

View File

@ -20,13 +20,18 @@ import (
"github.com/pkg/errors"
"github.com/spf13/cobra"
"fmt"
"os"
pt "path"
"strings"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/node"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/reason"
)
@ -35,14 +40,16 @@ import (
var (
srcPath string
dstPath string
dstNode string
)
// cpCmd represents the cp command, similar to docker cp
var cpCmd = &cobra.Command{
Use: "cp <source file path> <target file absolute path>",
Use: "cp <source file path> <target node name>:<target file absolute path>",
Short: "Copy the specified file into minikube",
Long: "Copy the specified file into minikube, it will be saved at path <target file absolute path> in your minikube.\n" +
"Example Command : \"minikube cp a.txt /home/docker/b.txt\"\n",
"Example Command : \"minikube cp a.txt /home/docker/b.txt\"\n" +
" \"minikube cp a.txt minikube-m02:/home/docker/b.txt\"\n",
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 2 {
exit.Message(reason.Usage, `Please specify the path to copy:
@ -51,18 +58,51 @@ var cpCmd = &cobra.Command{
srcPath = args[0]
dstPath = args[1]
// if destination path is not a absolute path, trying to parse with <node>:<abs path> format
if !strings.HasPrefix(dstPath, "/") {
if sp := strings.SplitN(dstPath, ":", 2); len(sp) == 2 {
dstNode = sp[0]
dstPath = sp[1]
}
}
validateArgs(srcPath, dstPath)
co := mustload.Running(ClusterFlagValue())
fa, err := assets.NewFileAsset(srcPath, pt.Dir(dstPath), pt.Base(dstPath), "0644")
if err != nil {
out.ErrLn("%v", errors.Wrap(err, "getting file asset"))
os.Exit(1)
}
defer func() {
if err := fa.Close(); err != nil {
klog.Warningf("error closing the file %s: %v", fa.GetSourcePath(), err)
}
}()
if err = co.CP.Runner.Copy(fa); err != nil {
out.ErrLn("%v", errors.Wrap(err, "copying file"))
os.Exit(1)
co := mustload.Running(ClusterFlagValue())
var runner command.Runner
if dstNode == "" {
runner = co.CP.Runner
} else {
n, _, err := node.Retrieve(*co.Config, dstNode)
if err != nil {
exit.Message(reason.GuestNodeRetrieve, "Node {{.nodeName}} does not exist.", out.V{"nodeName": dstNode})
}
h, err := machine.GetHost(co.API, *co.Config, *n)
if err != nil {
exit.Error(reason.GuestLoadHost, "Error getting host", err)
}
runner, err = machine.CommandRunner(h)
if err != nil {
exit.Error(reason.InternalCommandRunner, "Failed to get command runner", err)
}
}
if err = runner.Copy(fa); err != nil {
exit.Error(reason.InternalCommandRunner, fmt.Sprintf("Fail to copy file %s", fa.GetSourcePath()), err)
}
},
}

View File

@ -24,6 +24,7 @@ import (
"os/exec"
"os/user"
"regexp"
"strconv"
"time"
"github.com/pkg/errors"
@ -44,7 +45,8 @@ import (
)
var (
dashboardURLMode bool
dashboardURLMode bool
dashboardExposedPort int
// Matches: 127.0.0.1:8001
// TODO(tstromberg): Get kubectl to implement a stable supported output format.
hostPortRe = regexp.MustCompile(`127.0.0.1:\d{4,}`)
@ -65,6 +67,10 @@ var dashboardCmd = &cobra.Command{
}
}
if dashboardExposedPort < 0 || dashboardExposedPort > 65535 {
exit.Message(reason.HostKubectlProxy, "Invalid port")
}
kubectlVersion := co.Config.KubernetesConfig.KubernetesVersion
var err error
@ -92,7 +98,7 @@ var dashboardCmd = &cobra.Command{
}
out.ErrT(style.Launch, "Launching proxy ...")
p, hostPort, err := kubectlProxy(kubectlVersion, cname)
p, hostPort, err := kubectlProxy(kubectlVersion, cname, dashboardExposedPort)
if err != nil {
exit.Error(reason.HostKubectlProxy, "kubectl proxy", err)
}
@ -126,10 +132,10 @@ var dashboardCmd = &cobra.Command{
}
// kubectlProxy runs "kubectl proxy", returning host:port
func kubectlProxy(kubectlVersion string, contextName string) (*exec.Cmd, string, error) {
func kubectlProxy(kubectlVersion string, contextName string, port int) (*exec.Cmd, string, error) {
// port=0 picks a random system port
kubectlArgs := []string{"--context", contextName, "proxy", "--port=0"}
kubectlArgs := []string{"--context", contextName, "proxy", "--port", strconv.Itoa(port)}
var cmd *exec.Cmd
if kubectl, err := exec.LookPath("kubectl"); err == nil {
@ -172,8 +178,8 @@ func kubectlProxy(kubectlVersion string, contextName string) (*exec.Cmd, string,
// readByteWithTimeout returns a byte from a reader or an indicator that a timeout has occurred.
func readByteWithTimeout(r io.ByteReader, timeout time.Duration) (byte, bool, error) {
bc := make(chan byte)
ec := make(chan error)
bc := make(chan byte, 1)
ec := make(chan error, 1)
go func() {
b, err := r.ReadByte()
if err != nil {
@ -217,4 +223,5 @@ func checkURL(url string) error {
func init() {
dashboardCmd.Flags().BoolVar(&dashboardURLMode, "url", false, "Display dashboard URL instead of opening a browser")
dashboardCmd.Flags().IntVar(&dashboardExposedPort, "port", 0, "Exposed port of the proxyfied dashboard. Set to 0 to pick a random port.")
}

View File

@ -87,6 +87,24 @@ func (error DeletionError) Error() string {
return error.Err.Error()
}
var hostAndDirsDeleter = func(api libmachine.API, cc *config.ClusterConfig, profileName string) error {
if err := killMountProcess(); err != nil {
out.FailureT("Failed to kill mount process: {{.error}}", out.V{"error": err})
}
deleteHosts(api, cc)
// In case DeleteHost didn't complete the job.
deleteProfileDirectory(profileName)
deleteMachineDirectories(cc)
if err := deleteConfig(profileName); err != nil {
return err
}
return deleteContext(profileName)
}
func init() {
deleteCmd.Flags().BoolVar(&deleteAll, "all", false, "Set flag to delete all profiles")
deleteCmd.Flags().BoolVar(&purge, "purge", false, "Set this flag to delete the '.minikube' folder from your user directory.")
@ -210,25 +228,30 @@ func DeleteProfiles(profiles []*config.Profile) []error {
klog.Infof("DeleteProfiles")
var errs []error
for _, profile := range profiles {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
err := deleteProfile(ctx, profile)
if err != nil {
mm, loadErr := machine.LoadMachine(profile.Name)
if !profile.IsValid() || (loadErr != nil || !mm.IsValid()) {
invalidProfileDeletionErrs := deleteInvalidProfile(profile)
if len(invalidProfileDeletionErrs) > 0 {
errs = append(errs, invalidProfileDeletionErrs...)
}
} else {
errs = append(errs, err)
}
}
errs = append(errs, deleteProfileTimeout(profile)...)
}
return errs
}
func deleteProfileTimeout(profile *config.Profile) []error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
if err := deleteProfile(ctx, profile); err != nil {
mm, loadErr := machine.LoadMachine(profile.Name)
if !profile.IsValid() || (loadErr != nil || !mm.IsValid()) {
invalidProfileDeletionErrs := deleteInvalidProfile(profile)
if len(invalidProfileDeletionErrs) > 0 {
return invalidProfileDeletionErrs
}
} else {
return []error{err}
}
}
return nil
}
func deleteProfile(ctx context.Context, profile *config.Profile) error {
klog.Infof("Deleting %s", profile.Name)
register.Reg.SetStep(register.Deleting)
@ -239,6 +262,9 @@ func deleteProfile(ctx context.Context, profile *config.Profile) error {
// if driver is oci driver, delete containers and volumes
if driver.IsKIC(profile.Config.Driver) {
if err := unpauseIfNeeded(profile); err != nil {
klog.Warningf("failed to unpause %s : %v", profile.Name, err)
}
out.Step(style.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": profile.Name, "driver_name": profile.Config.Driver})
for _, n := range profile.Config.Nodes {
machineName := config.MachineName(*profile.Config, n)
@ -274,27 +300,57 @@ func deleteProfile(ctx context.Context, profile *config.Profile) error {
}
}
if err := killMountProcess(); err != nil {
out.FailureT("Failed to kill mount process: {{.error}}", out.V{"error": err})
}
deleteHosts(api, cc)
// In case DeleteHost didn't complete the job.
deleteProfileDirectory(profile.Name)
deleteMachineDirectories(cc)
if err := deleteConfig(profile.Name); err != nil {
if err := hostAndDirsDeleter(api, cc, profile.Name); err != nil {
return err
}
if err := deleteContext(profile.Name); err != nil {
return err
}
out.Step(style.Deleted, `Removed all traces of the "{{.name}}" cluster.`, out.V{"name": profile.Name})
return nil
}
func unpauseIfNeeded(profile *config.Profile) error {
// there is a known issue with removing kicbase container with paused containerd/crio containers inside
// unpause it before we delete it
crName := profile.Config.KubernetesConfig.ContainerRuntime
if crName == "docker" {
return nil
}
api, err := machine.NewAPIClient()
if err != nil {
return err
}
defer api.Close()
host, err := machine.LoadHost(api, profile.Name)
if err != nil {
return err
}
r, err := machine.CommandRunner(host)
if err != nil {
exit.Error(reason.InternalCommandRunner, "Failed to get command runner", err)
}
cr, err := cruntime.New(cruntime.Config{Type: crName, Runner: r})
if err != nil {
exit.Error(reason.InternalNewRuntime, "Failed to create runtime", err)
}
paused, err := cluster.CheckIfPaused(cr, nil)
if err != nil {
return err
}
if !paused {
return nil
}
klog.Infof("Unpause cluster %q", profile.Name)
_, err = cluster.Unpause(cr, r, nil)
return err
}
func deleteHosts(api libmachine.API, cc *config.ClusterConfig) {
register.Reg.SetStep(register.Deleting)

View File

@ -17,15 +17,18 @@ limitations under the License.
package cmd
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/docker/machine/libmachine"
"github.com/google/go-cmp/cmp"
"github.com/otiai10/copy"
"github.com/spf13/viper"
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/localpath"
)
@ -114,6 +117,7 @@ func TestDeleteProfile(t *testing.T) {
t.Logf("load failure: %v", err)
}
hostAndDirsDeleter = hostAndDirsDeleterMock
errs := DeleteProfiles([]*config.Profile{profile})
if len(errs) > 0 {
HandleDeletionErrors(errs)
@ -154,6 +158,17 @@ func TestDeleteProfile(t *testing.T) {
}
}
var hostAndDirsDeleterMock = func(api libmachine.API, cc *config.ClusterConfig, profileName string) error {
return deleteContextTest()
}
func deleteContextTest() error {
if err := cmdcfg.Unset(config.ProfileName); err != nil {
return DeletionError{Err: fmt.Errorf("unset minikube profile: %v", err), Errtype: Fatal}
}
return nil
}
func TestDeleteAllProfiles(t *testing.T) {
td, err := ioutil.TempDir("", "all")
if err != nil {
@ -207,6 +222,7 @@ func TestDeleteAllProfiles(t *testing.T) {
}
profiles := append(validProfiles, inValidProfiles...)
hostAndDirsDeleter = hostAndDirsDeleterMock
errs := DeleteProfiles(profiles)
if errs != nil {

View File

@ -29,6 +29,7 @@ import (
var docsPath string
var testPath string
var codePath string
// generateDocs represents the generate-docs command
var generateDocs = &cobra.Command{
@ -45,16 +46,18 @@ var generateDocs = &cobra.Command{
}
// generate docs
if err := generate.Docs(RootCmd, docsPath, testPath); err != nil {
if err := generate.Docs(RootCmd, docsPath, testPath, codePath); err != nil {
exit.Error(reason.InternalGenerateDocs, "Unable to generate docs", err)
}
out.Step(style.Documentation, "Docs have been saved at - {{.path}}", out.V{"path": docsPath})
out.Step(style.Documentation, "Test docs have been saved at - {{.path}}", out.V{"path": testPath})
out.Step(style.Documentation, "Error code docs have been saved at - {{.path}}", out.V{"path": codePath})
},
}
func init() {
generateDocs.Flags().StringVar(&docsPath, "path", "", "The path on the file system where the docs in markdown need to be saved")
generateDocs.Flags().StringVar(&testPath, "test-path", "", "The path on the file system where the testing docs in markdown need to be saved")
generateDocs.Flags().StringVar(&codePath, "code-path", "", "The path on the file system where the error code docs in markdown need to be saved")
RootCmd.AddCommand(generateDocs)
}

View File

@ -17,37 +17,47 @@ limitations under the License.
package cmd
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/spf13/pflag"
"k8s.io/minikube/pkg/generate"
)
func TestGenerateDocs(t *testing.T) {
pflag.BoolP("help", "h", false, "") // avoid 'Docs are not updated. Please run `make generate-docs` to update commands documentation' error
dir := "../../../site/content/en/docs/commands/"
func TestGenerateTestDocs(t *testing.T) {
tempdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("creating temp dir failed: %v", err)
}
defer os.RemoveAll(tempdir)
docPath := filepath.Join(tempdir, "tests.md")
for _, sc := range RootCmd.Commands() {
t.Run(sc.Name(), func(t *testing.T) {
if sc.Hidden {
t.Skip()
}
fp := filepath.Join(dir, fmt.Sprintf("%s.md", sc.Name()))
expectedContents, err := ioutil.ReadFile(fp)
if err != nil {
t.Fatalf("Docs are not updated. Please run `make generate-docs` to update commands documentation: %v", err)
}
actualContents, err := generate.DocForCommand(sc)
if err != nil {
t.Fatalf("error getting contents: %v", err)
}
if diff := cmp.Diff(actualContents, string(expectedContents)); diff != "" {
t.Fatalf("Docs are not updated. Please run `make generate-docs` to update commands documentation: %s", diff)
}
})
err = generate.TestDocs(docPath, "../../../test/integration")
if err != nil {
t.Fatalf("error generating test docs: %v", err)
}
actualContents, err := ioutil.ReadFile(docPath)
if err != nil {
t.Fatalf("error reading generated file: %v", err)
}
rest := string(actualContents)
for rest != "" {
rest = checkForNeedsDoc(t, rest)
}
}
func checkForNeedsDoc(t *testing.T, content string) string {
needs := "\nNEEDS DOC\n"
index := strings.Index(content, needs)
if index < 0 {
return ""
}
topHalf := content[:index]
testName := topHalf[strings.LastIndex(topHalf, "\n"):]
t.Errorf("%s is missing a doc string.", testName)
return content[index+len(needs):]
}

View File

@ -45,6 +45,7 @@ var (
pull bool
imgDaemon bool
imgRemote bool
overwrite bool
tag string
push bool
dockerFile string
@ -130,13 +131,13 @@ var loadImageCmd = &cobra.Command{
if imgDaemon || imgRemote {
image.UseDaemon(imgDaemon)
image.UseRemote(imgRemote)
if err := machine.CacheAndLoadImages(args, []*config.Profile{profile}); err != nil {
if err := machine.CacheAndLoadImages(args, []*config.Profile{profile}, overwrite); err != nil {
exit.Error(reason.GuestImageLoad, "Failed to load image", err)
}
} else if local {
// Load images from local files, without doing any caching or checks in container runtime
// This is similar to tarball.Image but it is done by the container runtime in the cluster.
if err := machine.DoLoadImages(args, []*config.Profile{profile}, ""); err != nil {
if err := machine.DoLoadImages(args, []*config.Profile{profile}, "", overwrite); err != nil {
exit.Error(reason.GuestImageLoad, "Failed to load image", err)
}
}
@ -152,7 +153,7 @@ $ minikube image rm image busybox
$ minikube image unload image busybox
`,
Args: cobra.MinimumNArgs(1),
Aliases: []string{"unload"},
Aliases: []string{"remove", "unload"},
Run: func(cmd *cobra.Command, args []string) {
profile, err := config.LoadProfile(viper.GetString(config.ProfileName))
if err != nil {
@ -226,12 +227,12 @@ var buildImageCmd = &cobra.Command{
}
var listImageCmd = &cobra.Command{
Use: "list",
Use: "ls",
Short: "List images",
Example: `
$ minikube image list
$ minikube image ls
`,
Aliases: []string{"ls"},
Aliases: []string{"list"},
Run: func(cmd *cobra.Command, args []string) {
profile, err := config.LoadProfile(viper.GetString(config.ProfileName))
if err != nil {
@ -248,6 +249,7 @@ func init() {
loadImageCmd.Flags().BoolVarP(&pull, "pull", "", false, "Pull the remote image (no caching)")
loadImageCmd.Flags().BoolVar(&imgDaemon, "daemon", false, "Cache image from docker daemon")
loadImageCmd.Flags().BoolVar(&imgRemote, "remote", false, "Cache image from remote registry")
loadImageCmd.Flags().BoolVar(&overwrite, "overwrite", true, "Overwrite image even if same image:tag name exists")
imageCmd.AddCommand(loadImageCmd)
imageCmd.AddCommand(removeImageCmd)
buildImageCmd.Flags().StringVarP(&tag, "tag", "t", "", "Tag to apply to the new image (optional)")

View File

@ -27,6 +27,7 @@ import (
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/detect"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/node"
@ -80,6 +81,19 @@ host. Please be aware that when using --ssh all paths will apply to the remote m
return
}
supported := false
arch := detect.RuntimeArch()
for _, a := range constants.SupportedArchitectures {
if arch == a {
supported = true
break
}
}
if !supported {
fmt.Fprintf(os.Stderr, "Not supported on: %s\n", arch)
os.Exit(1)
}
if len(args) > 1 && args[0] != "--help" {
cluster := []string{"--cluster", cname}
args = append(cluster, args...)

View File

@ -50,6 +50,7 @@ const (
// placeholders for flag values
var (
mountIP string
mountPort uint16
mountVersion string
mountType string
isKill bool
@ -191,6 +192,9 @@ var mountCmd = &cobra.Command{
err = cluster.Mount(co.CP.Runner, ip.String(), vmPath, cfg)
if err != nil {
if rtErr, ok := err.(*cluster.MountError); ok && rtErr.ErrorType == cluster.MountErrorConnect {
exit.Error(reason.GuestMountCouldNotConnect, "mount could not connect", rtErr)
}
exit.Error(reason.GuestMount, "mount failed", err)
}
out.Step(style.Success, "Successfully mounted {{.sourcePath}} to {{.destinationPath}}", out.V{"sourcePath": hostPath, "destinationPath": vmPath})
@ -202,6 +206,7 @@ var mountCmd = &cobra.Command{
func init() {
mountCmd.Flags().StringVar(&mountIP, "ip", "", "Specify the ip that the mount should be setup on")
mountCmd.Flags().Uint16Var(&mountPort, "port", 0, "Specify the port that the mount should be setup on, where 0 means any free port.")
mountCmd.Flags().StringVar(&mountType, "type", nineP, "Specify the mount filesystem type (supported types: 9p)")
mountCmd.Flags().StringVar(&mountVersion, "9p-version", defaultMountVersion, "Specify the 9p version that the mount should use")
mountCmd.Flags().BoolVar(&isKill, "kill", false, "Kill the mount process spawned by minikube start")
@ -212,9 +217,9 @@ func init() {
mountCmd.Flags().IntVar(&mSize, "msize", defaultMsize, "The number of bytes to use for 9p packet payload")
}
// getPort asks the kernel for a free open port that is ready to use
// getPort uses the requested port or asks the kernel for a free open port that is ready to use
func getPort() (int, error) {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("localhost:%d", mountPort))
if err != nil {
panic(err)
}

View File

@ -104,7 +104,7 @@ func runPause(cmd *cobra.Command, args []string) {
}
func init() {
pauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", constants.DefaultNamespaces, "namespaces to pause")
pauseCmd.Flags().StringSliceVarP(&namespaces, "namespaces", "n", constants.DefaultNamespaces, "namespaces to pause")
pauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, pause all namespaces")
pauseCmd.Flags().StringVarP(&outputFormat, "output", "o", "text", "Format to print stdout in. Options include: [text,json]")
}

View File

@ -25,9 +25,6 @@ import (
"strings"
"time"
"k8s.io/minikube/pkg/minikube/notify"
"k8s.io/minikube/pkg/version"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
@ -41,9 +38,11 @@ import (
"k8s.io/minikube/pkg/minikube/detect"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/notify"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/reason"
"k8s.io/minikube/pkg/minikube/translate"
"k8s.io/minikube/pkg/version"
)
var dirs = [...]string{
@ -91,16 +90,17 @@ func Execute() {
}
}
if !found {
exit.Message(reason.WrongBinaryWSL, "You are trying to run windows .exe binary inside WSL, for better integration please use Linux binary instead (Download at https://minikube.sigs.k8s.io/docs/start/.). Otherwise if you still want to do this, you can do it using --force")
exit.Message(reason.WrongBinaryWSL, "You are trying to run a windows .exe binary inside WSL. For better integration please use a Linux binary instead (Download at https://minikube.sigs.k8s.io/docs/start/.). Otherwise if you still want to do this, you can do it using --force")
}
}
if runtime.GOOS == "darwin" && detect.IsAmd64M1Emulation() {
exit.Message(reason.WrongBinaryM1, "You are trying to run amd64 binary on M1 system. Please use darwin/arm64 binary instead (Download at {{.url}}.)",
out.V{"url": notify.DownloadURL(version.GetVersion(), "darwin", "amd64")})
out.Infof("You are trying to run amd64 binary on M1 system. Please consider running darwin/arm64 binary instead (Download at {{.url}}.)",
out.V{"url": notify.DownloadURL(version.GetVersion(), "darwin", "arm64")})
}
_, callingCmd := filepath.Split(os.Args[0])
callingCmd = strings.TrimSuffix(callingCmd, ".exe")
if callingCmd == "kubectl" {
// If the user is using the minikube binary as kubectl, allow them to specify the kubectl context without also specifying minikube profile
@ -301,14 +301,11 @@ func setupViper() {
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
viper.AutomaticEnv()
viper.RegisterAlias(config.EmbedCerts, embedCerts)
viper.SetDefault(config.WantUpdateNotification, true)
viper.SetDefault(config.ReminderWaitPeriodInHours, 24)
viper.SetDefault(config.WantReportError, false)
viper.SetDefault(config.WantReportErrorPrompt, true)
viper.SetDefault(config.WantKubectlDownloadMsg, true)
viper.SetDefault(config.WantNoneDriverWarning, true)
viper.SetDefault(config.ShowDriverDeprecationNotification, true)
viper.SetDefault(config.ShowBootstrapperDeprecationNotification, true)
viper.SetDefault(config.WantVirtualBoxDriverWarning, true)
}
func addToPath(dir string) {

View File

@ -28,10 +28,11 @@ import (
"os/user"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"github.com/blang/semver"
"github.com/blang/semver/v4"
"github.com/docker/machine/libmachine/ssh"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
@ -50,8 +51,10 @@ import (
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/detect"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/driver/auxdriver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/kubeconfig"
"k8s.io/minikube/pkg/minikube/localpath"
@ -120,7 +123,7 @@ func platform() string {
// This environment is exotic, let's output a bit more.
if vrole == "guest" || runtime.GOARCH != "amd64" {
if vsys != "" {
if vrole == "guest" && vsys != "" {
s.WriteString(fmt.Sprintf(" (%s/%s)", vsys, runtime.GOARCH))
} else {
s.WriteString(fmt.Sprintf(" (%s)", runtime.GOARCH))
@ -159,14 +162,13 @@ func runStart(cmd *cobra.Command, args []string) {
// can be configured as MINIKUBE_IMAGE_REPOSITORY and IMAGE_MIRROR_COUNTRY
// this should be updated to documentation
if len(registryMirror) == 0 {
registryMirror = viper.GetStringSlice("registry_mirror")
registryMirror = viper.GetStringSlice("registry-mirror")
}
if !config.ProfileNameValid(ClusterFlagValue()) {
out.WarningT("Profile name '{{.name}}' is not valid", out.V{"name": ClusterFlagValue()})
exit.Message(reason.Usage, "Only alphanumeric and dashes '-' are permitted. Minimum 2 characters, starting with alphanumeric.")
}
existing, err := config.Load(ClusterFlagValue())
if err != nil && !config.IsNotExist(err) {
kind := reason.HostConfigLoad
@ -177,7 +179,7 @@ func runStart(cmd *cobra.Command, args []string) {
}
if existing != nil {
upgradeExistingConfig(existing)
upgradeExistingConfig(cmd, existing)
} else {
validateProfileName()
}
@ -214,7 +216,7 @@ func runStart(cmd *cobra.Command, args []string) {
// Walk down the rest of the options
for _, alt := range alts {
// Skip non-default drivers
if !ds.Default {
if !alt.Default {
continue
}
out.WarningT("Startup with {{.old_driver}} driver failed, trying with alternate driver {{.new_driver}}: {{.error}}", out.V{"old_driver": ds.Name, "new_driver": alt.Name, "error": err})
@ -401,7 +403,7 @@ func updateDriver(driverName string) {
v, err := version.GetSemverVersion()
if err != nil {
out.WarningT("Error parsing minikube version: {{.error}}", out.V{"error": err})
} else if err := driver.InstallOrUpdate(driverName, localpath.MakeMiniPath("bin"), v, viper.GetBool(interactive), viper.GetBool(autoUpdate)); err != nil {
} else if err := auxdriver.InstallOrUpdate(driverName, localpath.MakeMiniPath("bin"), v, viper.GetBool(interactive), viper.GetBool(autoUpdate)); err != nil {
out.WarningT("Unable to update {{.driver}} driver: {{.error}}", out.V{"driver": driverName, "error": err})
}
}
@ -588,10 +590,39 @@ func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []regis
pick, alts, rejects := driver.Suggest(choices)
if pick.Name == "" {
out.Step(style.ThumbsDown, "Unable to pick a default driver. Here is what was considered, in preference order:")
sort.Slice(rejects, func(i, j int) bool {
if rejects[i].Priority == rejects[j].Priority {
return rejects[i].Preference > rejects[j].Preference
}
return rejects[i].Priority > rejects[j].Priority
})
for _, r := range rejects {
if !r.Default {
continue
}
out.Infof("{{ .name }}: {{ .rejection }}", out.V{"name": r.Name, "rejection": r.Rejection})
if r.Suggestion != "" {
out.Infof("{{ .name }}: Suggestion: {{ .suggestion}}", out.V{"name": r.Name, "suggestion": r.Suggestion})
}
}
foundStoppedDocker := false
foundUnhealthy := false
for _, reject := range rejects {
if reject.Name == driver.Docker && reject.State.Installed && !reject.State.Running {
foundStoppedDocker = true
break
} else if reject.State.Installed && !reject.State.Healthy {
foundUnhealthy = true
break
}
}
if foundStoppedDocker {
exit.Message(reason.DrvDockerNotRunning, "Found docker, but the docker service isn't running. Try restarting the docker service.")
} else if foundUnhealthy {
exit.Message(reason.DrvNotHealthy, "Found driver(s) but none were healthy. See above for suggestions how to fix installed drivers.")
} else {
exit.Message(reason.DrvNotDetected, "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/")
}
exit.Message(reason.DrvNotDetected, "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/")
}
if len(alts) > 1 {
@ -715,9 +746,11 @@ func validateSpecifiedDriver(existing *config.ClusterConfig) {
// validateDriver validates that the selected driver appears sane, exits if not
func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
name := ds.Name
os := detect.RuntimeOS()
arch := detect.RuntimeArch()
klog.Infof("validating driver %q against %+v", name, existing)
if !driver.Supported(name) {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}", out.V{"driver": name, "os": runtime.GOOS, "arch": runtime.GOARCH})
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}", out.V{"driver": name, "os": os, "arch": arch})
}
// if we are only downloading artifacts for a driver, we can stop validation here
@ -997,56 +1030,49 @@ func validateRequestedMemorySize(req int, drvName string) {
// validateCPUCount validates the cpu count matches the minimum recommended & not exceeding the available cpu count
func validateCPUCount(drvName string) {
var cpuCount int
if driver.BareMetal(drvName) {
var availableCPUs int
// Uses the gopsutil cpu package to count the number of logical cpu cores
cpuCount := getCPUCount(drvName)
isKIC := driver.IsKIC(drvName)
if isKIC {
si, err := oci.CachedDaemonInfo(drvName)
if err != nil {
si, err = oci.DaemonInfo(drvName)
if err != nil {
exit.Message(reason.Usage, "Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)})
}
}
availableCPUs = si.CPUs
} else {
ci, err := cpu.Counts(true)
if err != nil {
klog.Warningf("Unable to get CPU info: %v", err)
} else {
cpuCount = ci
exit.Message(reason.Usage, "Unable to get CPU info: {{.err}}", out.V{"err": err})
}
} else {
cpuCount = viper.GetInt(cpus)
availableCPUs = ci
}
if cpuCount < minimumCPUS {
exitIfNotForced(reason.RsrcInsufficientCores, "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}", out.V{"requested_cpus": cpuCount, "minimum_cpus": minimumCPUS})
}
if !driver.IsKIC((drvName)) {
return
}
si, err := oci.CachedDaemonInfo(drvName)
if err != nil {
out.Styled(style.Confused, "Failed to verify '{{.driver_name}} info' will try again ...", out.V{"driver_name": drvName})
si, err = oci.DaemonInfo(drvName)
if err != nil {
exit.Message(reason.Usage, "Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)})
}
}
if si.CPUs < cpuCount {
if availableCPUs < cpuCount {
if driver.IsDockerDesktop(drvName) {
out.Styled(style.Empty, `- Ensure your {{.driver_name}} daemon has access to enough CPU/memory resources.`, out.V{"driver_name": drvName})
if runtime.GOOS == "darwin" {
out.Styled(style.Empty, `- Docs https://docs.docker.com/docker-for-mac/#resources`, out.V{"driver_name": drvName})
out.Styled(style.Empty, `- Docs https://docs.docker.com/docker-for-mac/#resources`)
}
if runtime.GOOS == "windows" {
out.String("\n\t")
out.Styled(style.Empty, `- Docs https://docs.docker.com/docker-for-windows/#resources`, out.V{"driver_name": drvName})
out.Styled(style.Empty, `- Docs https://docs.docker.com/docker-for-windows/#resources`)
}
}
exitIfNotForced(reason.RsrcInsufficientCores, "Requested cpu count {{.requested_cpus}} is greater than the available cpus of {{.avail_cpus}}", out.V{"requested_cpus": cpuCount, "avail_cpus": si.CPUs})
exitIfNotForced(reason.RsrcInsufficientCores, "Requested cpu count {{.requested_cpus}} is greater than the available cpus of {{.avail_cpus}}", out.V{"requested_cpus": cpuCount, "avail_cpus": availableCPUs})
}
// looks good
if si.CPUs >= 2 {
if availableCPUs >= 2 {
return
}
@ -1114,6 +1140,8 @@ func validateFlags(cmd *cobra.Command, drvName string) {
if !validRuntime {
exit.Message(reason.Usage, `Invalid Container Runtime: "{{.runtime}}". Valid runtimes are: {{.validOptions}}`, out.V{"runtime": runtime, "validOptions": strings.Join(cruntime.ValidRuntimes(), ", ")})
}
validateCNI(cmd, runtime)
}
if driver.BareMetal(drvName) {
@ -1176,7 +1204,20 @@ func validateFlags(cmd *cobra.Command, drvName string) {
validateRegistryMirror()
validateInsecureRegistry()
}
// if container runtime is not docker, check that cni is not disabled
func validateCNI(cmd *cobra.Command, runtime string) {
if runtime == "docker" {
return
}
if cmd.Flags().Changed(cniFlag) && strings.ToLower(viper.GetString(cniFlag)) == "false" {
if viper.GetBool(force) {
out.WarnReason(reason.Usage, "You have chosen to disable the CNI but the \"{{.name}}\" container runtime requires CNI", out.V{"name": runtime})
} else {
exit.Message(reason.Usage, "The \"{{.name}}\" container runtime requires CNI", out.V{"name": runtime})
}
}
}
// validateChangedMemoryFlags validates memory related flags.
@ -1188,13 +1229,32 @@ func validateChangedMemoryFlags(drvName string) {
if !driver.HasResourceLimits(drvName) {
out.WarningT("The '{{.name}}' driver does not respect the --memory flag", out.V{"name": drvName})
}
req, err := util.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
exitIfNotForced(reason.Usage, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
var req int
var err error
memString := viper.GetString(memory)
if memString == constants.MaxResources {
sysLimit, containerLimit, err := memoryLimits(drvName)
if err != nil {
klog.Warningf("Unable to query memory limits: %+v", err)
}
req = noLimitMemory(sysLimit, containerLimit)
} else {
req, err = util.CalculateSizeInMB(memString)
if err != nil {
exitIfNotForced(reason.Usage, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": memString, "error": err})
}
}
validateRequestedMemorySize(req, drvName)
}
func noLimitMemory(sysLimit int, containerLimit int) int {
if containerLimit != 0 {
return containerLimit
}
// Recommend 1GB to handle OS/VM overhead
return sysLimit - 1024
}
// This function validates if the --registry-mirror
// args match the format of http://localhost
func validateRegistryMirror() {
@ -1214,26 +1274,36 @@ func validateRegistryMirror() {
// This function validates if the --image-repository
// args match the format of registry.cn-hangzhou.aliyuncs.com/google_containers
func validateImageRepository(imagRepo string) (vaildImageRepo string) {
// also "<hostname>[:<port>]"
func validateImageRepository(imageRepo string) (validImageRepo string) {
if strings.ToLower(imagRepo) == "auto" {
vaildImageRepo = "auto"
if strings.ToLower(imageRepo) == "auto" {
validImageRepo = "auto"
}
URL, err := url.Parse(imagRepo)
URL, err := url.Parse(imageRepo)
if err != nil {
klog.Errorln("Error Parsing URL: ", err)
}
// tips when imagRepo ended with a trailing /.
if strings.HasSuffix(imagRepo, "/") {
out.Infof("The --image-repository flag your provided ended with a trailing / that could cause conflict in kuberentes, removed automatically")
}
// tips when imageRepo started with scheme.
if URL.Scheme != "" {
out.Infof("The --image-repository flag your provided contains Scheme: {{.scheme}}, it will be as a domian, removed automatically", out.V{"scheme": URL.Scheme})
var imageRepoPort string
if URL.Port() != "" && strings.Contains(imageRepo, ":"+URL.Port()) {
imageRepoPort = ":" + URL.Port()
}
vaildImageRepo = URL.Hostname() + strings.TrimSuffix(URL.Path, "/")
return
// tips when imageRepo ended with a trailing /.
if strings.HasSuffix(imageRepo, "/") {
out.Infof("The --image-repository flag your provided ended with a trailing / that could cause conflict in kuberentes, removed automatically")
}
// tips when imageRepo started with scheme such as http(s).
if URL.Scheme != "" {
out.Infof("The --image-repository flag your provided contains Scheme: {{.scheme}}, which will be removed automatically", out.V{"scheme": URL.Scheme})
}
validImageRepo = URL.Hostname() + imageRepoPort + strings.TrimSuffix(URL.Path, "/")
return validImageRepo
}
// This function validates if the --listen-address
@ -1462,5 +1532,8 @@ func exitGuestProvision(err error) {
if errors.Cause(err) == oci.ErrInsufficientDockerStorage {
exit.Message(reason.RsrcInsufficientDockerStorage, "preload extraction failed: \"No space left on device\"")
}
exit.Error(reason.GuestProvision, "error provisioning host", err)
if errors.Cause(err) == oci.ErrGetSSHPortContainerNotRunning {
exit.Message(reason.GuestProvisionContainerExited, "Docker container exited prematurely after it was created, consider investigating Docker's performance/health.")
}
exit.Error(reason.GuestProvision, "error provisioning guest", err)
}

View File

@ -21,12 +21,14 @@ import (
"strings"
"time"
"github.com/blang/semver"
"github.com/blang/semver/v4"
"github.com/pkg/errors"
"github.com/shirou/gopsutil/v3/cpu"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/drivers/kic"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
"k8s.io/minikube/pkg/minikube/cni"
@ -118,6 +120,7 @@ const (
defaultSSHUser = "root"
defaultSSHPort = 22
listenAddress = "listen-address"
extraDisks = "extra-disks"
)
var (
@ -135,8 +138,8 @@ func initMinikubeFlags() {
startCmd.Flags().Bool(interactive, true, "Allow user prompts for more information")
startCmd.Flags().Bool(dryRun, false, "dry-run mode. Validates configuration, but does not mutate system state")
startCmd.Flags().Int(cpus, 2, "Number of CPUs allocated to Kubernetes.")
startCmd.Flags().String(memory, "", "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>], where unit = b, k, m or g).")
startCmd.Flags().String(cpus, "2", fmt.Sprintf("Number of CPUs allocated to Kubernetes. Use %q to use the maximum number of CPUs.", constants.MaxResources))
startCmd.Flags().String(memory, "", fmt.Sprintf("Amount of RAM to allocate to Kubernetes (format: <number>[<unit>], where unit = b, k, m or g). Use %q to use the maximum amount of memory.", constants.MaxResources))
startCmd.Flags().String(humanReadableDiskSize, defaultDiskSize, "Disk size allocated to the minikube VM (format: <number>[<unit>], where unit = b, k, m or g).")
startCmd.Flags().Bool(downloadOnly, false, "If true, only download and cache files for later use - don't install or start anything.")
startCmd.Flags().Bool(cacheImages, true, "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none.")
@ -147,7 +150,7 @@ func initMinikubeFlags() {
startCmd.Flags().String(containerRuntime, constants.DefaultContainerRuntime, fmt.Sprintf("The container runtime to be used (%s).", strings.Join(cruntime.ValidRuntimes(), ", ")))
startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.")
startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.")
startCmd.Flags().StringSliceVar(&config.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.")
startCmd.Flags().StringSlice(config.AddonListFlag, nil, "Enable addons. see `minikube addons list` for a list of valid addon names.")
startCmd.Flags().String(criSocket, "", "The cri socket path to be used.")
startCmd.Flags().String(networkPlugin, "", "Kubelet network plug-in to use (default: auto)")
startCmd.Flags().Bool(enableDefaultCNI, false, "DEPRECATED: Replaced by --cni=bridge")
@ -160,10 +163,11 @@ func initMinikubeFlags() {
startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.")
startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.")
startCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
startCmd.Flags().Bool(forceSystemd, false, "If set, force the container runtime to use sytemd as cgroup manager. Defaults to false.")
startCmd.Flags().Bool(forceSystemd, false, "If set, force the container runtime to use systemd as cgroup manager. Defaults to false.")
startCmd.Flags().StringP(network, "", "", "network to run minikube with. Now it is used by docker/podman and KVM drivers. If left empty, minikube will create a new network.")
startCmd.Flags().StringVarP(&outputFormat, "output", "o", "text", "Format to print stdout in. Options include: [text,json]")
startCmd.Flags().StringP(trace, "", "", "Send trace events. Options include: [gcp]")
startCmd.Flags().Int(extraDisks, 0, "Number of extra disks created and attached to the minikube VM (currently only implemented for hyperkit driver)")
}
// initKubernetesFlags inits the commandline flags for Kubernetes related options
@ -290,10 +294,31 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
return createNode(cc, kubeNodeName, existing)
}
// generateNewConfigFromFlags generate a config.ClusterConfig based on flags
func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) config.ClusterConfig {
var cc config.ClusterConfig
func getCPUCount(drvName string) int {
if viper.GetString(cpus) != constants.MaxResources {
return viper.GetInt(cpus)
}
if !driver.IsKIC(drvName) {
ci, err := cpu.Counts(true)
if err != nil {
exit.Message(reason.Usage, "Unable to get CPU info: {{.err}}", out.V{"err": err})
}
return ci
}
si, err := oci.CachedDaemonInfo(drvName)
if err != nil {
si, err = oci.DaemonInfo(drvName)
if err != nil {
exit.Message(reason.Usage, "Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)})
}
}
return si.CPUs
}
func getMemorySize(cmd *cobra.Command, drvName string) int {
sysLimit, containerLimit, err := memoryLimits(drvName)
if err != nil {
klog.Warningf("Unable to query memory limits: %+v", err)
@ -301,10 +326,15 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s
mem := suggestMemoryAllocation(sysLimit, containerLimit, viper.GetInt(nodes))
if cmd.Flags().Changed(memory) || viper.IsSet(memory) {
memString := viper.GetString(memory)
var err error
mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
exit.Message(reason.Usage, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
if memString == constants.MaxResources {
mem = noLimitMemory(sysLimit, containerLimit)
} else {
mem, err = pkgutil.CalculateSizeInMB(memString)
if err != nil {
exit.Message(reason.Usage, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": memString, "error": err})
}
}
if driver.IsKIC(drvName) && mem > containerLimit {
exit.Message(reason.Usage, "{{.driver_name}} has only {{.container_limit}}MB memory but you specified {{.specified_memory}}MB", out.V{"container_limit": containerLimit, "specified_memory": mem, "driver_name": driver.FullName(drvName)})
@ -314,11 +344,19 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s
klog.Infof("Using suggested %dMB memory alloc based on sys=%dMB, container=%dMB", mem, sysLimit, containerLimit)
}
return mem
}
func getDiskSize() int {
diskSize, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if err != nil {
exit.Message(reason.Usage, "Generate unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
}
return diskSize
}
func getRepository(cmd *cobra.Command, k8sVersion string) string {
repository := viper.GetString(imageRepository)
mirrorCountry := strings.ToLower(viper.GetString(imageMirrorCountry))
if strings.ToLower(repository) == "auto" || (mirrorCountry != "" && repository == "") {
@ -342,12 +380,23 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s
out.Styled(style.Success, "Using image repository {{.name}}", out.V{"name": repository})
}
return repository
}
func getCNIConfig(cmd *cobra.Command) string {
// Backwards compatibility with --enable-default-cni
chosenCNI := viper.GetString(cniFlag)
if viper.GetBool(enableDefaultCNI) && !cmd.Flags().Changed(cniFlag) {
klog.Errorf("Found deprecated --enable-default-cni flag, setting --cni=bridge")
chosenCNI = "bridge"
}
return chosenCNI
}
// generateNewConfigFromFlags generate a config.ClusterConfig based on flags
func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) config.ClusterConfig {
var cc config.ClusterConfig
// networkPlugin cni deprecation warning
chosenNetworkPlugin := viper.GetString(networkPlugin)
if chosenNetworkPlugin == "cni" {
@ -360,6 +409,8 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s
checkNumaCount(k8sVersion)
checkExtraDiskOptions(cmd, drvName)
cc = config.ClusterConfig{
Name: ClusterFlagValue(),
KeepContext: viper.GetBool(keepContext),
@ -367,9 +418,9 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s
MinikubeISO: viper.GetString(isoURL),
KicBaseImage: viper.GetString(kicBaseImage),
Network: viper.GetString(network),
Memory: mem,
CPUs: viper.GetInt(cpus),
DiskSize: diskSize,
Memory: getMemorySize(cmd, drvName),
CPUs: getCPUCount(drvName),
DiskSize: getDiskSize(),
Driver: drvName,
ListenAddress: viper.GetString(listenAddress),
HyperkitVpnKitSock: viper.GetString(vpnkitSock),
@ -402,6 +453,7 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s
SSHUser: viper.GetString(sshSSHUser),
SSHKey: viper.GetString(sshSSHKey),
SSHPort: viper.GetInt(sshSSHPort),
ExtraDisks: viper.GetInt(extraDisks),
KubernetesConfig: config.KubernetesConfig{
KubernetesVersion: k8sVersion,
ClusterName: ClusterFlagValue(),
@ -415,10 +467,10 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s
CRISocket: viper.GetString(criSocket),
NetworkPlugin: chosenNetworkPlugin,
ServiceCIDR: viper.GetString(serviceCIDR),
ImageRepository: repository,
ImageRepository: getRepository(cmd, k8sVersion),
ExtraOptions: config.ExtraOptions,
ShouldLoadCachedImages: viper.GetBool(cacheImages),
CNI: chosenCNI,
CNI: getCNIConfig(cmd),
NodePort: viper.GetInt(apiServerPort),
},
MultiNodeRequested: viper.GetInt(nodes) > 1,
@ -448,7 +500,7 @@ func checkNumaCount(k8sVersion string) {
}
// upgradeExistingConfig upgrades legacy configuration files
func upgradeExistingConfig(cc *config.ClusterConfig) {
func upgradeExistingConfig(cmd *cobra.Command, cc *config.ClusterConfig) {
if cc == nil {
return
}
@ -468,6 +520,26 @@ func upgradeExistingConfig(cc *config.ClusterConfig) {
cc.KicBaseImage = viper.GetString(kicBaseImage)
klog.Infof("config upgrade: KicBaseImage=%s", cc.KicBaseImage)
}
if cc.CPUs == 0 {
klog.Info("Existing config file was missing cpu. (could be an old minikube config), will use the default value")
cc.CPUs = viper.GetInt(cpus)
}
if cc.Memory == 0 {
klog.Info("Existing config file was missing memory. (could be an old minikube config), will use the default value")
memInMB := getMemorySize(cmd, cc.Driver)
cc.Memory = memInMB
}
// pre minikube 1.9.2 cc.KubernetesConfig.NodePort was not populated.
// in minikube config there were two fields for api server port.
// one in cc.KubernetesConfig.NodePort and one in cc.Nodes.Port
// this makes sure api server port not be set as 0!
if cc.KubernetesConfig.NodePort == 0 {
cc.KubernetesConfig.NodePort = viper.GetInt(apiServerPort)
}
}
// updateExistingConfigFromFlags will update the existing config from the flags - used on a second start
@ -478,222 +550,79 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
cc := *existing
if cmd.Flags().Changed(containerRuntime) {
cc.KubernetesConfig.ContainerRuntime = viper.GetString(containerRuntime)
if cmd.Flags().Changed(memory) && getMemorySize(cmd, cc.Driver) != cc.Memory {
out.WarningT("You cannot change the memory size for an existing minikube cluster. Please first delete the cluster.")
}
if cmd.Flags().Changed(keepContext) {
cc.KeepContext = viper.GetBool(keepContext)
}
if cmd.Flags().Changed(embedCerts) {
cc.EmbedCerts = viper.GetBool(embedCerts)
}
if cmd.Flags().Changed(isoURL) {
cc.MinikubeISO = viper.GetString(isoURL)
}
if cc.Memory == 0 {
klog.Info("Existing config file was missing memory. (could be an old minikube config), will use the default value")
memInMB, err := pkgutil.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
klog.Warningf("error calculate memory size in mb : %v", err)
}
cc.Memory = memInMB
}
if cmd.Flags().Changed(memory) {
memInMB, err := pkgutil.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
klog.Warningf("error calculate memory size in mb : %v", err)
}
if memInMB != cc.Memory {
out.WarningT("You cannot change the memory size for an existing minikube cluster. Please first delete the cluster.")
}
if cmd.Flags().Changed(cpus) && viper.GetInt(cpus) != cc.CPUs {
out.WarningT("You cannot change the CPUs for an existing minikube cluster. Please first delete the cluster.")
}
// validate the memory size in case user changed their system memory limits (example change docker desktop or upgraded memory.)
validateRequestedMemorySize(cc.Memory, cc.Driver)
if cc.CPUs == 0 {
klog.Info("Existing config file was missing cpu. (could be an old minikube config), will use the default value")
cc.CPUs = viper.GetInt(cpus)
}
if cmd.Flags().Changed(cpus) {
if viper.GetInt(cpus) != cc.CPUs {
out.WarningT("You cannot change the CPUs for an existing minikube cluster. Please first delete the cluster.")
}
if cmd.Flags().Changed(humanReadableDiskSize) && getDiskSize() != existing.DiskSize {
out.WarningT("You cannot change the disk size for an existing minikube cluster. Please first delete the cluster.")
}
if cmd.Flags().Changed(humanReadableDiskSize) {
memInMB, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if err != nil {
klog.Warningf("error calculate disk size in mb : %v", err)
}
if memInMB != existing.DiskSize {
out.WarningT("You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.")
}
checkExtraDiskOptions(cmd, cc.Driver)
if cmd.Flags().Changed(extraDisks) && viper.GetInt(extraDisks) != existing.ExtraDisks {
out.WarningT("You cannot add or remove extra disks for an existing minikube cluster. Please first delete the cluster.")
}
if cmd.Flags().Changed(vpnkitSock) {
cc.HyperkitVpnKitSock = viper.GetString(vpnkitSock)
}
if cmd.Flags().Changed(vsockPorts) {
cc.HyperkitVSockPorts = viper.GetStringSlice(vsockPorts)
}
if cmd.Flags().Changed(nfsShare) {
cc.NFSShare = viper.GetStringSlice(nfsShare)
}
if cmd.Flags().Changed(nfsSharesRoot) {
cc.NFSSharesRoot = viper.GetString(nfsSharesRoot)
}
if cmd.Flags().Changed(hostOnlyCIDR) {
cc.HostOnlyCIDR = viper.GetString(hostOnlyCIDR)
}
if cmd.Flags().Changed(hypervVirtualSwitch) {
cc.HypervVirtualSwitch = viper.GetString(hypervVirtualSwitch)
}
if cmd.Flags().Changed(hypervUseExternalSwitch) {
cc.HypervUseExternalSwitch = viper.GetBool(hypervUseExternalSwitch)
}
if cmd.Flags().Changed(hypervExternalAdapter) {
cc.HypervExternalAdapter = viper.GetString(hypervExternalAdapter)
}
if cmd.Flags().Changed(kvmNetwork) {
cc.KVMNetwork = viper.GetString(kvmNetwork)
}
if cmd.Flags().Changed(kvmQemuURI) {
cc.KVMQemuURI = viper.GetString(kvmQemuURI)
}
if cmd.Flags().Changed(kvmGPU) {
cc.KVMGPU = viper.GetBool(kvmGPU)
}
if cmd.Flags().Changed(kvmHidden) {
cc.KVMHidden = viper.GetBool(kvmHidden)
}
if cmd.Flags().Changed(kvmNUMACount) {
cc.KVMNUMACount = viper.GetInt(kvmNUMACount)
}
if cmd.Flags().Changed(disableDriverMounts) {
cc.DisableDriverMounts = viper.GetBool(disableDriverMounts)
}
if cmd.Flags().Changed(uuid) {
cc.UUID = viper.GetString(uuid)
}
if cmd.Flags().Changed(noVTXCheck) {
cc.NoVTXCheck = viper.GetBool(noVTXCheck)
}
if cmd.Flags().Changed(dnsProxy) {
cc.DNSProxy = viper.GetBool(dnsProxy)
}
if cmd.Flags().Changed(hostDNSResolver) {
cc.HostDNSResolver = viper.GetBool(hostDNSResolver)
}
if cmd.Flags().Changed(hostOnlyNicType) {
cc.HostOnlyNicType = viper.GetString(hostOnlyNicType)
}
if cmd.Flags().Changed(natNicType) {
cc.NatNicType = viper.GetString(natNicType)
}
updateStringFromFlag(cmd, &cc.MinikubeISO, isoURL)
updateBoolFromFlag(cmd, &cc.KeepContext, keepContext)
updateBoolFromFlag(cmd, &cc.EmbedCerts, embedCerts)
updateStringFromFlag(cmd, &cc.MinikubeISO, isoURL)
updateStringFromFlag(cmd, &cc.KicBaseImage, kicBaseImage)
updateStringFromFlag(cmd, &cc.Network, network)
updateStringFromFlag(cmd, &cc.HyperkitVpnKitSock, vpnkitSock)
updateStringSliceFromFlag(cmd, &cc.HyperkitVSockPorts, vsockPorts)
updateStringSliceFromFlag(cmd, &cc.NFSShare, nfsShare)
updateStringFromFlag(cmd, &cc.NFSSharesRoot, nfsSharesRoot)
updateStringFromFlag(cmd, &cc.HostOnlyCIDR, hostOnlyCIDR)
updateStringFromFlag(cmd, &cc.HypervVirtualSwitch, hypervVirtualSwitch)
updateBoolFromFlag(cmd, &cc.HypervUseExternalSwitch, hypervUseExternalSwitch)
updateStringFromFlag(cmd, &cc.HypervExternalAdapter, hypervExternalAdapter)
updateStringFromFlag(cmd, &cc.KVMNetwork, kvmNetwork)
updateStringFromFlag(cmd, &cc.KVMQemuURI, kvmQemuURI)
updateBoolFromFlag(cmd, &cc.KVMGPU, kvmGPU)
updateBoolFromFlag(cmd, &cc.KVMHidden, kvmHidden)
updateBoolFromFlag(cmd, &cc.DisableDriverMounts, disableDriverMounts)
updateStringFromFlag(cmd, &cc.UUID, uuid)
updateBoolFromFlag(cmd, &cc.NoVTXCheck, noVTXCheck)
updateBoolFromFlag(cmd, &cc.DNSProxy, dnsProxy)
updateBoolFromFlag(cmd, &cc.HostDNSResolver, hostDNSResolver)
updateStringFromFlag(cmd, &cc.HostOnlyNicType, hostOnlyNicType)
updateStringFromFlag(cmd, &cc.NatNicType, natNicType)
updateDurationFromFlag(cmd, &cc.StartHostTimeout, waitTimeout)
updateStringSliceFromFlag(cmd, &cc.ExposedPorts, ports)
updateStringFromFlag(cmd, &cc.SSHIPAddress, sshIPAddress)
updateStringFromFlag(cmd, &cc.SSHUser, sshSSHUser)
updateStringFromFlag(cmd, &cc.SSHKey, sshSSHKey)
updateIntFromFlag(cmd, &cc.SSHPort, sshSSHPort)
updateStringFromFlag(cmd, &cc.KubernetesConfig.Namespace, startNamespace)
updateStringFromFlag(cmd, &cc.KubernetesConfig.APIServerName, apiServerName)
updateStringSliceFromFlag(cmd, &cc.KubernetesConfig.APIServerNames, "apiserver-names")
updateStringFromFlag(cmd, &cc.KubernetesConfig.DNSDomain, dnsDomain)
updateStringFromFlag(cmd, &cc.KubernetesConfig.FeatureGates, featureGates)
updateStringFromFlag(cmd, &cc.KubernetesConfig.ContainerRuntime, containerRuntime)
updateStringFromFlag(cmd, &cc.KubernetesConfig.CRISocket, criSocket)
updateStringFromFlag(cmd, &cc.KubernetesConfig.NetworkPlugin, networkPlugin)
updateStringFromFlag(cmd, &cc.KubernetesConfig.ServiceCIDR, serviceCIDR)
updateBoolFromFlag(cmd, &cc.KubernetesConfig.ShouldLoadCachedImages, cacheImages)
updateIntFromFlag(cmd, &cc.KubernetesConfig.NodePort, apiServerPort)
if cmd.Flags().Changed(kubernetesVersion) {
cc.KubernetesConfig.KubernetesVersion = getKubernetesVersion(existing)
}
if cmd.Flags().Changed(startNamespace) {
cc.KubernetesConfig.Namespace = viper.GetString(startNamespace)
}
if cmd.Flags().Changed(apiServerName) {
cc.KubernetesConfig.APIServerName = viper.GetString(apiServerName)
}
if cmd.Flags().Changed("apiserver-names") {
cc.KubernetesConfig.APIServerNames = viper.GetStringSlice("apiserver-names")
}
if cmd.Flags().Changed(apiServerPort) {
cc.KubernetesConfig.NodePort = viper.GetInt(apiServerPort)
}
if cmd.Flags().Changed(vsockPorts) {
cc.ExposedPorts = viper.GetStringSlice(ports)
}
// pre minikube 1.9.2 cc.KubernetesConfig.NodePort was not populated.
// in minikube config there were two fields for api server port.
// one in cc.KubernetesConfig.NodePort and one in cc.Nodes.Port
// this makes sure api server port not be set as 0!
if existing.KubernetesConfig.NodePort == 0 {
cc.KubernetesConfig.NodePort = viper.GetInt(apiServerPort)
}
if cmd.Flags().Changed(dnsDomain) {
cc.KubernetesConfig.DNSDomain = viper.GetString(dnsDomain)
}
if cmd.Flags().Changed(featureGates) {
cc.KubernetesConfig.FeatureGates = viper.GetString(featureGates)
}
if cmd.Flags().Changed(containerRuntime) {
cc.KubernetesConfig.ContainerRuntime = viper.GetString(containerRuntime)
}
if cmd.Flags().Changed(criSocket) {
cc.KubernetesConfig.CRISocket = viper.GetString(criSocket)
}
if cmd.Flags().Changed(networkPlugin) {
cc.KubernetesConfig.NetworkPlugin = viper.GetString(networkPlugin)
}
if cmd.Flags().Changed(serviceCIDR) {
cc.KubernetesConfig.ServiceCIDR = viper.GetString(serviceCIDR)
}
if cmd.Flags().Changed(cacheImages) {
cc.KubernetesConfig.ShouldLoadCachedImages = viper.GetBool(cacheImages)
}
if cmd.Flags().Changed(imageRepository) {
cc.KubernetesConfig.ImageRepository = viper.GetString(imageRepository)
}
if cmd.Flags().Changed("extra-config") {
cc.KubernetesConfig.ExtraOptions = config.ExtraOptions
}
if cmd.Flags().Changed(enableDefaultCNI) && !cmd.Flags().Changed(cniFlag) {
if viper.GetBool(enableDefaultCNI) {
klog.Errorf("Found deprecated --enable-default-cni flag, setting --cni=bridge")
cc.KubernetesConfig.CNI = "bridge"
}
}
if cmd.Flags().Changed(cniFlag) {
cc.KubernetesConfig.CNI = viper.GetString(cniFlag)
if cmd.Flags().Changed(cniFlag) || cmd.Flags().Changed(enableDefaultCNI) {
cc.KubernetesConfig.CNI = getCNIConfig(cmd)
}
if cmd.Flags().Changed(waitComponents) {
@ -708,6 +637,41 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
return cc
}
// updateStringFromFlag will update the existing string from the flag.
func updateStringFromFlag(cmd *cobra.Command, v *string, key string) {
if cmd.Flags().Changed(key) {
*v = viper.GetString(key)
}
}
// updateBoolFromFlag will update the existing bool from the flag.
func updateBoolFromFlag(cmd *cobra.Command, v *bool, key string) {
if cmd.Flags().Changed(key) {
*v = viper.GetBool(key)
}
}
// updateStringSliceFromFlag will update the existing []string from the flag.
func updateStringSliceFromFlag(cmd *cobra.Command, v *[]string, key string) {
if cmd.Flags().Changed(key) {
*v = viper.GetStringSlice(key)
}
}
// updateIntFromFlag will update the existing int from the flag.
func updateIntFromFlag(cmd *cobra.Command, v *int, key string) {
if cmd.Flags().Changed(key) {
*v = viper.GetInt(key)
}
}
// updateDurationFromFlag will update the existing duration from the flag.
func updateDurationFromFlag(cmd *cobra.Command, v *time.Duration, key string) {
if cmd.Flags().Changed(key) {
*v = viper.GetDuration(key)
}
}
// interpretWaitFlag interprets the wait flag and respects the legacy minikube users
// returns map of components to wait for
func interpretWaitFlag(cmd cobra.Command) map[string]bool {
@ -752,3 +716,20 @@ func interpretWaitFlag(cmd cobra.Command) map[string]bool {
klog.Infof("Waiting for components: %+v", waitComponents)
return waitComponents
}
func checkExtraDiskOptions(cmd *cobra.Command, driverName string) {
supportedDrivers := []string{driver.HyperKit}
if cmd.Flags().Changed(extraDisks) {
supported := false
for _, driver := range supportedDrivers {
if driverName == driver {
supported = true
break
}
}
if !supported {
out.WarningT("Specifying extra disks is currently only supported for the following drivers: {{.supported_drivers}}. If you can contribute to add this feature, please create a PR.", out.V{"supported_drivers": supportedDrivers})
}
}
}

View File

@ -21,7 +21,7 @@ import (
"strings"
"testing"
"github.com/blang/semver"
"github.com/blang/semver/v4"
"github.com/spf13/cobra"
"github.com/spf13/viper"
@ -320,40 +320,44 @@ func TestBaseImageFlagDriverCombo(t *testing.T) {
func TestValidateImageRepository(t *testing.T) {
var tests = []struct {
imageRepository string
vaildImageRepository string
validImageRepository string
}{
{
imageRepository: "auto",
vaildImageRepository: "auto",
validImageRepository: "auto",
},
{
imageRepository: "http://registry.test.com/google_containers/",
vaildImageRepository: "registry.test.com/google_containers",
validImageRepository: "registry.test.com/google_containers",
},
{
imageRepository: "https://registry.test.com/google_containers/",
vaildImageRepository: "registry.test.com/google_containers",
validImageRepository: "registry.test.com/google_containers",
},
{
imageRepository: "registry.test.com/google_containers/",
vaildImageRepository: "registry.test.com/google_containers",
validImageRepository: "registry.test.com/google_containers",
},
{
imageRepository: "http://registry.test.com/google_containers",
vaildImageRepository: "registry.test.com/google_containers",
validImageRepository: "registry.test.com/google_containers",
},
{
imageRepository: "https://registry.test.com/google_containers",
vaildImageRepository: "registry.test.com/google_containers",
validImageRepository: "registry.test.com/google_containers",
},
{
imageRepository: "https://registry.test.com:6666/google_containers",
validImageRepository: "registry.test.com:6666/google_containers",
},
}
for _, test := range tests {
t.Run(test.imageRepository, func(t *testing.T) {
vaildImageRepository := validateImageRepository(test.imageRepository)
if vaildImageRepository != test.vaildImageRepository {
validImageRepository := validateImageRepository(test.imageRepository)
if validImageRepository != test.validImageRepository {
t.Errorf("validateImageRepository(imageRepo=%v): got %v, expected %v",
test.imageRepository, vaildImageRepository, test.vaildImageRepository)
test.imageRepository, validImageRepository, test.validImageRepository)
}
})
}

View File

@ -64,7 +64,7 @@ func init() {
stopCmd.Flags().StringVarP(&outputFormat, "output", "o", "text", "Format to print stdout in. Options include: [text,json]")
if err := viper.GetViper().BindPFlags(stopCmd.Flags()); err != nil {
exit.Error(reason.InternalFlagsBind, "unable to bind flags", err)
exit.Error(reason.InternalBindFlags, "unable to bind flags", err)
}
}

View File

@ -39,8 +39,9 @@ import (
// unpauseCmd represents the docker-pause command
var unpauseCmd = &cobra.Command{
Use: "unpause",
Short: "unpause Kubernetes",
Use: "unpause",
Aliases: []string{"resume"},
Short: "unpause Kubernetes",
Run: func(cmd *cobra.Command, args []string) {
cname := ClusterFlagValue()
register.SetEventLogPath(localpath.EventLog(cname))
@ -105,7 +106,7 @@ var unpauseCmd = &cobra.Command{
}
func init() {
unpauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", constants.DefaultNamespaces, "namespaces to unpause")
unpauseCmd.Flags().StringSliceVarP(&namespaces, "namespaces", "n", constants.DefaultNamespaces, "namespaces to unpause")
unpauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, unpause all namespaces")
unpauseCmd.Flags().StringVarP(&outputFormat, "output", "o", "text", "Format to print stdout in. Options include: [text,json]")
}

View File

@ -18,18 +18,23 @@ package cmd
import (
"encoding/json"
"os/exec"
"strings"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/reason"
"k8s.io/minikube/pkg/version"
)
var (
versionOutput string
shortVersion bool
versionOutput string
shortVersion bool
listComponentsVersions bool
)
var versionCmd = &cobra.Command{
@ -43,6 +48,33 @@ var versionCmd = &cobra.Command{
"minikubeVersion": minikubeVersion,
"commit": gitCommitID,
}
if listComponentsVersions && !shortVersion {
co := mustload.Running(ClusterFlagValue())
runner := co.CP.Runner
versionCMDS := map[string]*exec.Cmd{
"docker": exec.Command("docker", "version", "--format={{.Client.Version}}"),
"containerd": exec.Command("containerd", "--version"),
"crio": exec.Command("crio", "version"),
"podman": exec.Command("sudo", "podman", "version"),
"crictl": exec.Command("sudo", "crictl", "version"),
"buildctl": exec.Command("buildctl", "--version"),
"ctr": exec.Command("sudo", "ctr", "version"),
"runc": exec.Command("runc", "--version"),
}
for k, v := range versionCMDS {
rr, err := runner.RunCmd(v)
if err != nil {
klog.Warningf("error getting %s's version: %v", k, err)
data[k] = "error"
} else {
data[k] = strings.TrimSpace(rr.Stdout.String())
}
}
}
switch versionOutput {
case "":
if !shortVersion {
@ -50,6 +82,15 @@ var versionCmd = &cobra.Command{
if gitCommitID != "" {
out.Ln("commit: %v", gitCommitID)
}
for k, v := range data {
// for backward compatibility we keep displaying the old way for these two
if k == "minikubeVersion" || k == "commit" {
continue
}
if v != "" {
out.Ln("\n%s:\n%s", k, v)
}
}
} else {
out.Ln("%v", minikubeVersion)
}
@ -74,4 +115,5 @@ var versionCmd = &cobra.Command{
func init() {
versionCmd.Flags().StringVarP(&versionOutput, "output", "o", "", "One of 'yaml' or 'json'.")
versionCmd.Flags().BoolVar(&shortVersion, "short", false, "Print just the version number.")
versionCmd.Flags().BoolVar(&listComponentsVersions, "components", false, "list versions of all components included with minikube. (the cluster must be running)")
}

View File

@ -28,6 +28,7 @@ import (
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/spf13/pflag"
"k8s.io/klog/v2"
@ -67,6 +68,7 @@ func main() {
// Don't parse flags when running as kubectl
_, callingCmd := filepath.Split(os.Args[0])
callingCmd = strings.TrimSuffix(callingCmd, ".exe")
parse := callingCmd != "kubectl"
setFlags(parse)
@ -152,7 +154,7 @@ func logFileName(dir string, logIdx int64) string {
klog.Warningf("Unable to add username %s to log filename hash: %v", user.Username, err)
}
}
for _, s := range os.Args {
for _, s := range pflag.Args() {
if _, err := h.Write([]byte(s)); err != nil {
klog.Warningf("Unable to add arg %s to log filename hash: %v", s, err)
}
@ -160,10 +162,10 @@ func logFileName(dir string, logIdx int64) string {
hs := hex.EncodeToString(h.Sum(nil))
var logfilePath string
// check if subcommand specified
if len(os.Args) < 2 {
if len(pflag.Args()) < 1 {
logfilePath = filepath.Join(dir, fmt.Sprintf("minikube_%s_%d.log", hs, logIdx))
} else {
logfilePath = filepath.Join(dir, fmt.Sprintf("minikube_%s_%s_%d.log", os.Args[1], hs, logIdx))
logfilePath = filepath.Join(dir, fmt.Sprintf("minikube_%s_%s_%d.log", pflag.Arg(0), hs, logIdx))
}
// if log has reached max size 1M, generate new logfile name by incrementing count
if checkLogFileMaxSize(logfilePath, 1024) {

138
deploy/addons/assets.go Normal file
View File

@ -0,0 +1,138 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package addons
import "embed"
var (
// AutoPauseAssets assets for auto-pause addon
//go:embed auto-pause/*.tmpl
//go:embed auto-pause/unpause.lua
AutoPauseAssets embed.FS
// DashboardAssets assets for dashboard addon
//go:embed dashboard/*.yaml dashboard/*.tmpl
DashboardAssets embed.FS
// DefaultStorageClassAssets assets for default-storageclass addon
//go:embed storageclass/storageclass.yaml.tmpl
DefaultStorageClassAssets embed.FS
// PodSecurityPolicyAssets assets for pod-security-policy addon
//go:embed pod-security-policy/pod-security-policy.yaml.tmpl
PodSecurityPolicyAssets embed.FS
// StorageProvisionerAssets assets for storage-provisioner addon
//go:embed storage-provisioner/storage-provisioner.yaml.tmpl
StorageProvisionerAssets embed.FS
// StorageProvisionerGlusterAssets assets for storage-provisioner-gluster addon
//go:embed storage-provisioner-gluster/*.tmpl
StorageProvisionerGlusterAssets embed.FS
// EfkAssets assets for efk addon
//go:embed efk/*.tmpl
EfkAssets embed.FS
// IngressAssets assets for ingress addon
//go:embed ingress/*.tmpl
IngressAssets embed.FS
// IstioProvisionerAssets assets for istio-provisioner addon
//go:embed istio-provisioner/istio-operator.yaml.tmpl
IstioProvisionerAssets embed.FS
// IstioAssets assets for istio addon
//go:embed istio/istio-default-profile.yaml.tmpl
IstioAssets embed.FS
// KubevirtAssets assets for kubevirt addon
//go:embed kubevirt/pod.yaml.tmpl
KubevirtAssets embed.FS
// MetricsServerAssets assets for metrics-server addon
//go:embed metrics-server/*.tmpl
MetricsServerAssets embed.FS
// OlmAssets assets for olm addon
//go:embed olm/*.tmpl
OlmAssets embed.FS
// RegistryAssets assets for registry addon
//go:embed registry/*.tmpl
RegistryAssets embed.FS
// RegistryCredsAssets assets for registry-creds addon
//go:embed registry-creds/registry-creds-rc.yaml.tmpl
RegistryCredsAssets embed.FS
// RegistryAliasesAssets assets for registry-aliases addon
//go:embed registry-aliases/*.tmpl
RegistryAliasesAssets embed.FS
// FreshpodAssets assets for freshpod addon
//go:embed freshpod/freshpod-rc.yaml.tmpl
FreshpodAssets embed.FS
// NvidiaDriverInstallerAssets assets for nvidia-driver-installer addon
//go:embed gpu/nvidia-driver-installer.yaml.tmpl
NvidiaDriverInstallerAssets embed.FS
// NvidiaGpuDevicePluginAssets assets for nvidia-gpu-device-plugin addon
//go:embed gpu/nvidia-gpu-device-plugin.yaml.tmpl
NvidiaGpuDevicePluginAssets embed.FS
// LogviewerAssets assets for logviewer addon
//go:embed logviewer/*.tmpl
LogviewerAssets embed.FS
// GvisorAssets assets for gvisor addon
//go:embed gvisor/*.tmpl gvisor/*.toml
GvisorAssets embed.FS
// HelmTillerAssets assets for helm-tiller addon
//go:embed helm-tiller/*.tmpl
HelmTillerAssets embed.FS
// IngressDNSAssets assets for ingress-dns addon
//go:embed ingress-dns/ingress-dns-pod.yaml.tmpl
IngressDNSAssets embed.FS
// MetallbAssets assets for metallb addon
//go:embed metallb/*.tmpl
MetallbAssets embed.FS
// AmbassadorAssets assets for ambassador addon
//go:embed ambassador/*.tmpl
AmbassadorAssets embed.FS
// GcpAuthAssets assets for gcp-auth addon
//go:embed gcp-auth/*.tmpl
GcpAuthAssets embed.FS
// VolumeSnapshotsAssets assets for volumesnapshots addon
//go:embed volumesnapshots/*.tmpl
VolumeSnapshotsAssets embed.FS
// CsiHostpathDriverAssets assets for csi-hostpath-driver addon
//go:embed csi-hostpath-driver/deploy/*.tmpl csi-hostpath-driver/rbac/*.tmpl
CsiHostpathDriverAssets embed.FS
// PortainerAssets assets for portainer addon
//go:embed portainer/portainer.yaml.tmpl
PortainerAssets embed.FS
)

View File

@ -3,7 +3,7 @@ Description=Auto Pause Service
[Service]
Type=simple
ExecStart=/bin/auto-pause
ExecStart=/bin/auto-pause --container-runtime={{.ContainerRuntime}}
Restart=always
[Install]

View File

@ -28,10 +28,10 @@ backend k8s-api-https
#tcp-request inspect-delay 10s
#tcp-request content lua.foo_action
tcp-request inspect-delay 10s
tcp-request content lua.unpause 192.168.49.2 8080
tcp-request content lua.unpause {{.NetworkInfo.ControlPlaneNodeIP}} 8080
tcp-request content reject if { var(req.blocked) -m bool }
option tcplog
option tcp-check
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server k8s-api-1 192.168.49.2:8443 check
server k8s-api-1 {{.NetworkInfo.ControlPlaneNodeIP}}:{{.NetworkInfo.ControlPlaneNodePort}} check

View File

@ -18,7 +18,7 @@ metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/minikube-addons: dashboard
addonmanager.kubernetes.io/mode: Reconcile
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
@ -31,7 +31,7 @@ metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/minikube-addons: dashboard
addonmanager.kubernetes.io/mode: Reconcile
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
@ -46,7 +46,7 @@ metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/minikube-addons: dashboard
addonmanager.kubernetes.io/mode: Reconcile
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque

View File

@ -164,7 +164,7 @@ webhooks:
namespace: gcp-auth
path: "/mutate"
rules:
- operations: ["CREATE", "UPDATE"]
- operations: ["CREATE"]
apiGroups: ["*"]
apiVersions: ["*"]
resources: ["pods"]

View File

@ -1,4 +1,4 @@
apiVersion: apiregistration.k8s.io/v1beta1
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io

View File

@ -0,0 +1,143 @@
---
# Source: portainer/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: portainer
---
# Source: portainer/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: portainer-sa-clusteradmin
namespace: portainer
labels:
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
app.kubernetes.io/version: "ce-latest-ee-2.4.0"
---
# Source: portainer/templates/pvc.yaml
kind: "PersistentVolumeClaim"
apiVersion: "v1"
metadata:
name: portainer
namespace: portainer
annotations:
volume.alpha.kubernetes.io/storage-class: "generic"
labels:
io.portainer.kubernetes.application.stack: portainer
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
app.kubernetes.io/version: "ce-latest-ee-2.4.0"
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
---
# Source: portainer/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: portainer
labels:
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
app.kubernetes.io/version: "ce-latest-ee-2.4.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
namespace: portainer
name: portainer-sa-clusteradmin
---
# Source: portainer/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: portainer
namespace: portainer
labels:
io.portainer.kubernetes.application.stack: portainer
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
app.kubernetes.io/version: "ce-latest-ee-2.4.0"
kubernetes.io/minikube-addons-endpoint: portainer
spec:
type: NodePort
ports:
- port: 9000
targetPort: 9000
protocol: TCP
name: http
nodePort: 30777
- port: 30776
targetPort: 30776
protocol: TCP
name: edge
nodePort: 30776
selector:
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
---
# Source: portainer/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: portainer
namespace: portainer
labels:
io.portainer.kubernetes.application.stack: portainer
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
app.kubernetes.io/version: "ce-latest-ee-2.4.0"
spec:
replicas: 1
strategy:
type: "Recreate"
selector:
matchLabels:
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
template:
metadata:
labels:
app.kubernetes.io/name: portainer
app.kubernetes.io/instance: portainer
spec:
nodeSelector:
{}
serviceAccountName: portainer-sa-clusteradmin
volumes:
- name: "data"
persistentVolumeClaim:
claimName: portainer
containers:
- name: portainer
image: "portainer/portainer-ce:latest"
imagePullPolicy: Always
args: [ '--tunnel-port','30776' ]
volumeMounts:
- name: data
mountPath: /data
ports:
- name: http
containerPort: 9000
protocol: TCP
- name: tcp-edge
containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
path: /
port: 9000
readinessProbe:
httpGet:
path: /
port: 9000
resources:
{}

View File

@ -9,9 +9,15 @@ MAGIC="boot2docker, please format-me"
# If there is a partition with `boot2docker-data` as its label, use it and be
# very happy. Thus, you can come along if you feel like a room without a roof.
BOOT2DOCKER_DATA=`blkid -o device -l -t LABEL=$LABEL`
UNPARTITIONED_HD="/dev/$(lsblk | grep disk | cut -f1 -d' ')"
DISKS="$(lsblk | grep disk | cut -f1 -d' ')"
echo $BOOT2DOCKER_DATA
if [ ! -n "$BOOT2DOCKER_DATA" ]; then
for DISK in $DISKS; do
if [ -n "$BOOT2DOCKER_DATA" ]; then
# The primary minikube disk has been found or provisioned; stop provisioning other disks
break
fi
UNPARTITIONED_HD="/dev/$DISK"
echo "Is the disk unpartitioned?, test for the 'boot2docker format-me' string"
# Is the disk unpartitioned?, test for the 'boot2docker format-me' string
@ -61,10 +67,10 @@ if [ ! -n "$BOOT2DOCKER_DATA" ]; then
BOOT2DOCKER_DATA=`echo "${UNPARTITIONED_HD}1"`
mkfs.ext4 -i 2048 -L $LABEL $BOOT2DOCKER_DATA
else
echo "Disk unpartitioned but something is there... not doing anything"
echo "Disk ($UNPARTITIONED_HD) unpartitioned but something is there... not doing anything"
fi
else
echo "Partition table found on disk, not doing anything"
echo "Partition table found on disk ($UNPARTITIONED_HD), not doing anything"
fi
fi
else
@ -72,7 +78,7 @@ if [ ! -n "$BOOT2DOCKER_DATA" ]; then
# TODO: mount all Linux partitions and look for a /var/lib/docker...
BOOT2DOCKER_DATA=`blkid | grep -e 'TYPE="btrfs"' -e 'TYPE="ext4"' | head -n 1 | sed 's/:.*//'`
fi
fi
done
echo $BOOT2DOCKER_DATA

View File

@ -16,7 +16,7 @@ ExecStart=/usr/bin/containerd \
--root ${PERSISTENT_DIR}/var/lib/containerd
TasksMax=8192
Delegate=yes
KillMode=process
KillMode=mixed
LimitNOFILE=1048576
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.

View File

@ -17,6 +17,14 @@
# For systemd + docker configuration used below, see the following references:
# https://systemd.io/CONTAINER_INTERFACE/
# multi-tage docker build so we can build auto-pause for arm64
FROM golang:1.16
WORKDIR /src
# becaue auto-pause binary depends on minikube's code we need to pass the whole source code as the context
ADD . .
RUN cd ./cmd/auto-pause/ && go build
# start from ubuntu 20.04, this image is reasonably small as a starting point
# for a kubernetes node image, it doesn't contain much we don't need
FROM ubuntu:focal-20210401
@ -24,12 +32,11 @@ FROM ubuntu:focal-20210401
ARG BUILDKIT_VERSION="v0.8.2"
# copy in static files (configs, scripts)
COPY 10-network-security.conf /etc/sysctl.d/10-network-security.conf
COPY 11-tcp-mtu-probing.conf /etc/sysctl.d/11-tcp-mtu-probing.conf
COPY clean-install /usr/local/bin/clean-install
COPY entrypoint /usr/local/bin/entrypoint
# must first run `make deploy/kicbase/auto-pause`
COPY auto-pause /bin/auto-pause
COPY deploy/kicbase/10-network-security.conf /etc/sysctl.d/10-network-security.conf
COPY deploy/kicbase/11-tcp-mtu-probing.conf /etc/sysctl.d/11-tcp-mtu-probing.conf
COPY deploy/kicbase/clean-install /usr/local/bin/clean-install
COPY deploy/kicbase/entrypoint /usr/local/bin/entrypoint
COPY --from=0 /src/cmd/auto-pause/auto-pause /bin/auto-pause
# Install dependencies, first from apt, then from release tarballs.
# NOTE: we use one RUN to minimize layers.
@ -63,6 +70,7 @@ RUN echo "Ensuring scripts are executable ..." \
libseccomp2 pigz \
bash ca-certificates curl rsync \
nfs-common \
iputils-ping netcat-openbsd vim-tiny \
&& find /lib/systemd/system/sysinit.target.wants/ -name "systemd-tmpfiles-setup.service" -delete \
&& rm -f /lib/systemd/system/multi-user.target.wants/* \
&& rm -f /etc/systemd/system/*.wants/* \
@ -151,14 +159,14 @@ RUN sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/lib
systemd-tmpfiles --create
# automount service
COPY automount/minikube-automount /usr/sbin/minikube-automount
COPY automount/minikube-automount.service /usr/lib/systemd/system/minikube-automount.service
COPY deploy/kicbase/automount/minikube-automount /usr/sbin/minikube-automount
COPY deploy/kicbase/automount/minikube-automount.service /usr/lib/systemd/system/minikube-automount.service
RUN ln -fs /usr/lib/systemd/system/minikube-automount.service \
/etc/systemd/system/multi-user.target.wants/minikube-automount.service
# scheduled stop service
COPY scheduled-stop/minikube-scheduled-stop /var/lib/minikube/scheduled-stop/minikube-scheduled-stop
COPY scheduled-stop/minikube-scheduled-stop.service /usr/lib/systemd/system/minikube-scheduled-stop.service
COPY deploy/kicbase/scheduled-stop/minikube-scheduled-stop /var/lib/minikube/scheduled-stop/minikube-scheduled-stop
COPY deploy/kicbase/scheduled-stop/minikube-scheduled-stop.service /usr/lib/systemd/system/minikube-scheduled-stop.service
RUN chmod +x /var/lib/minikube/scheduled-stop/minikube-scheduled-stop
# disable non-docker runtimes by default

View File

@ -1,5 +1,3 @@
// +build release
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
@ -47,13 +45,30 @@ func getSHAFromURL(url string) (string, error) {
return hex.EncodeToString(b[:]), nil
}
// TestReleasesJSON checks if all *GA* releases
// enlisted in https://storage.googleapis.com/minikube/releases.json
// are available to download and have correct hashsum
func TestReleasesJSON(t *testing.T) {
releases, err := notify.GetAllVersionsFromURL(notify.GithubMinikubeReleasesURL)
releases, err := notify.AllVersionsFromURL(notify.GithubMinikubeReleasesURL)
if err != nil {
t.Fatalf("Error getting releases.json: %v", err)
}
checkReleases(t, releases)
}
for _, r := range releases {
// TestBetaReleasesJSON checks if all *BETA* releases
// enlisted in https://storage.googleapis.com/minikube/releases-beta.json
// are available to download and have correct hashsum
func TestBetaReleasesJSON(t *testing.T) {
releases, err := notify.AllVersionsFromURL(notify.GithubMinikubeBetaReleasesURL)
if err != nil {
t.Fatalf("Error getting releases-bets.json: %v", err)
}
checkReleases(t, releases)
}
func checkReleases(t *testing.T, rs notify.Releases) {
for _, r := range rs {
fmt.Printf("Checking release: %s\n", r.Name)
for platform, sha := range r.Checksums {
fmt.Printf("Checking SHA for %s.\n", platform)

View File

@ -1,10 +1,26 @@
[
{
"name": "v1.22.0-beta.0",
"checksums": {
"darwin": "1ec06c37be5c6c79a7255da09ff83490a44d1e8cd2b2f45e4b489edfdeacde94",
"linux": "c9d9ac605a94748379188cced6b832037b8069441744b889214990c4ca3485a5",
"windows": "68fb9c24f0ea55b985856d0cce9fa0c288b8a4d7e13519d6f0790038165d7ef1"
}
},
{
"name": "v1.21.0-beta.0",
"checksums": {
"darwin": "69ab001eb4984d09ed731d5ac92afd8310e5c7672c2275b39d7a4c7e2dcfb4c6",
"linux": "41a26190c6774e1f3cc568986d4043431022d5dff4a619f131e9bb49d13e2874",
"windows": "e7d41c8c40e33633d47976047a48600ff23657c824db7e60fe5a4f2d2daeb135"
}
},
{
"name": "v1.20.0-beta.0",
"checksums": {
"darwin": "",
"linux": "",
"windows": ""
"darwin": "686f8d7c06c93f28543f982ec56a68544ab2ad6c7f70b39ede5174d7bac29651",
"linux": "fe0796852c9ef266597fc93fa4b7a88d2cab9ba7008f0e9f644b633c51d269a1",
"windows": "84a0686c90ab88d04a0aab57b8cadacf9197d3ea6b467f9f807d071efe7fad3c"
}
}
]

View File

@ -1,4 +1,20 @@
[
{
"name": "v1.22.0",
"checksums": {
"darwin": "932a278393cdcb90bff79c4e49d72c1c34910a71010f1466ce92f51d8332fb58",
"linux": "7579e5763a4e441500e5709eb058384c9cfe9c9dd888b39905b2cdf3d30fbf36",
"windows": "8764ca0e290b4420c5ec82371bcc1b542990a93bdf771578623554be32319d08"
}
},
{
"name": "v1.21.0",
"checksums": {
"darwin": "e2043883ca993b2a65396d379823dab6404dd842d0cc2a81348d247b01785070",
"linux": "5d423a00a24fdfbb95627a3fadbf58540fc4463be2338619257c529f93cf061b",
"windows": "74c961877798531ab8e53e2590bfae3cee7690d0c2e0614fdb44339e065124b5"
}
},
{
"name": "v1.20.0",
"checksums": {

View File

@ -0,0 +1,74 @@
# Public Flake Rate Charts
* First proposed: 2021-05-17
* Authors: Andriy Dzikh (@andriyDev)
## Reviewer Priorities
Please review this proposal with the following priorities:
* Does this fit with minikube's [principles](https://minikube.sigs.k8s.io/docs/concepts/principles/)?
* Are there other approaches to consider?
* Could the implementation be made simpler?
* Are there usability, reliability, or technical debt concerns?
## Summary
As of June 2021, public users have no way to view the flake rates of integration tests. This can make it tricky to determine whether an individual PR is causing a new error, or if the test failure is just a flake, or if the test is entirely broken. While each test failure should be investigated, sometimes an unrelated test fails, and knowing that the test has been flaky can increase confidence in a particular PR.
This proposal is for a system to inform users, both public and internal, of the flake rates of various tests on the master branch.
## Goals
* Comments on PRs describing the flake rates of failing tests
* Charts to visualize the flake rates of any test
## Design Details
### Overview
The full overview of the system is as follows:
* The `minikube` Jenkins job builds all binaries for integration tests. On completion, it triggers `minikube_set_pending.sh`, which updates the PR status of integration tests to pending. In addition, `minikube_set_pending.sh` will upload the list of environments to wait for to `gs://minikube-builds/logs/<MINIKUBE_LOCATION>/<minikube_BUILD_NUMBER>/started_environments.txt`
* Jenkins integration test jobs running on master generate gopogh summaries. Each job then triggers `Flake Rate Upload` which appends the completed environment to `gs://minikube-builds/logs/<MINIKUBE_LOCATION>/<minikube_BUILD_NUMBER>/finished_environments.txt`
* Once all started environments are present in finished environments, if running on master, all gopogh reports are processed through `upload_tests.sh` and appended into the dataset of all test runs at `gs://minikube-flake-rate/data.csv`. If running on a PR, the gopogh reports are used with `report_flakes.sh` to write a comment on PRs about the flake rates of all failed tests.
* A Jenkins job runs regularly to compute the flake rates of tests in `gs://minikube-flake-rate/data.csv` and outputs the results into `gs://minikube-flake-rate/flake_rates.csv`, including the environment (e.g. `Docker_Linux`), the test name, the flake rate as a percentage, and the average duration
* An HTML+JS file, hosted on `gs://minikube-flake-rate/flake_chart.html`, will read the full test data (`gs://minikube-flake-rate/data.csv`), and parse it into a chart displaying the daily flake rates and average durations of the requested tests (specified by url query arguments)
### Test Data Collection
Our system needs a way to collect data from our existing integration tests. As of June 2021, all integration Jenkins jobs run the integration tests, then use gopogh to create HTML files for viewing, and JSON files for summarizing the test results. The new system will then take these JSON summaries, and pass them into a script named `upload_tests.sh`. This script will process the summary into a CSV file of its test runs and related data, and upload this to a dataset of all test runs at `gs://minikube-flake-rate/data.csv`. This file will be publicly accessible to all users to read (and later chart the data).
### Flake Rate Computation
On a regular schedule (every 4 hours for example), a Jenkins job named `Flake Rate Computation` will download `gs://minikube-flake-rate/data.csv` and compute a failure percentage for each test/environment combination, based on the number of failures occurring in the past 15 days (this will be configurable). Note that this will be the past 15 dates that the test actually ran, since this can allow a test to be skipped for a long period of time and then unskipped while maintaining the old flake rate. This will also compute the average duration of the test for the past 15 days. The resulting data will then be stored in `gs://minikube-flake-rate/flake_rates.csv`.
### Charts
To allow users to see the daily "flakiness" of a test/environment combination, we will have an HTML file at `gs://minikube-flake-rate/flake_chart.html` and a JS file at `gs://minikube-flake-rate/flake_chart.js`. These will fetch `gs://minikube-flake-rate/data.csv` and parse it into Google Charts allowing us to visualize the "flakiness" over time. This can help track down exactly when a test became flaky by telling us the commits associated with each test date. The flake rate charts will use two query parameters (e.g. `https://storage.googleapis.com/minikube-flake-rate/flake_chart.html?test=TestFunctional/parallel/LogsCmd&env=Docker_Linux`): `test` which will control which test to view (`TestFunctional/parallel/LogsCmd`), and `env` which will control the environment to view (e.g. `Docker_Linux`). If `test` is omitted, a chart describing all tests for `env` will be displayed. By hosting this in a GCS bucket, we can avoid needing to create actual servers to manage this. Since these files are incredibly lightweight, there is little concern over the workload of hosting these files.
### PR Comments
As PRs can have many failures, it is useful to be told the flake rates of some of these tests. Some of our tests could be more stable, and knowing that a failed test is known to be unreliable can be informative for both the PR creators and the PR reviewers. To that end, once all integration tests have finished, it will call a script named `report_flakes.sh`. This script will use gopogh summaries of all environments (for the test run that should be reported about) and the public `gs://minikube-flake-rate/flake_rates.csv` to comment on the PR about all failed tests, their flake rates, and links to the flake charts for the test and the environment the failure occurred on.
### Additional Information
The raw data `gs://minikube-flake-rate/data.csv` can become quite large if stored as simple CSV data. Since this is a CSV file, it will contain columns for each field which includes commit hash, test date, test name, etc. Some of these fields can be repetitive like commit hash and test date. Since test runs are generally added such that all the tests for a single commit hash are added consecutively, we can use a sentinel value to repeat values. Specifically, if the previous row had the same value for the current column, we can replace the current column value with an empty space. When parsing the reverse can be performed - whenever a blank space is found, simply repeat the value of the previous row.
```
Input:
hash,2021-06-10,Docker_Linux,TestFunctional,Passed,0.5
hash,2021-06-10,Docker_Linux_containerd,TestFunctional,Failed,0.6
Output:
hash,2021-06-10,Docker_Linux,TestFunctional,Passed,0.5
,,DockerLinux_containerd,,Failed,0.6
```
This optimization will be done in `optimize_data.sh`.
## Alternatives Considered
Another optimization technique that can be used on `gs://minikube-flake-rate/data.csv` is to use a string table. The string table would be stored at `gs://minikube-flake-rate/data_strings.txt` and would contain an ordered list of unique strings. The index of each string can then be used in place of the actual text in `gs://minikube-flake-rate/data.csv`. The index into the string table will very likely be shorter than the text it represents, saving space. For non-consecutive strings, this can be a very big saving. For example, test names are repeated very often in `gs://minikube-flake-rate/data.csv`, but almost never consecutively. With this technique, the dataset can be compressed even further.
The trouble with this technique is complexity - any users of the dataset would need to also manage the string table. More importantly, if a new string needs to be added to the string table, the order is critical, meaning synchronization can be a problem (since our integration tests run in parallel). Due to these concerns, this option was rejected.

View File

@ -0,0 +1,205 @@
# Periodically tell user about minikube features/tips and tricks
* First proposed: 2021-06-18
* Authors: Peixuan Ding (@dinever)
## Reviewer Priorities
Please review this proposal with the following priorities:
* Does this fit with minikube's [principles](https://minikube.sigs.k8s.io/docs/concepts/principles/)?
* Are there other approaches to consider?
* Could the implementation be made simpler?
* Are there usability, reliability, or technical debt concerns?
Please leave the above text in your proposal as instructions to the reader.
## Summary
minikube has lots of great features. We want to proactively remind users that
those features are available.
To achieve this, we can have a tips feature that randomly shows a tip
from a curated list whenever the user starts a new minikube profile.
For example:
![Screenshot from 2021-06-18 00-58-02](https://user-images.githubusercontent.com/1311594/122508665-53bd6380-cfd0-11eb-9e99-a6c5935514d5.png)
## Goals
* Store a list of tips in a static file
* Show a random minikube usage tip each time a user starts a minikube profile
* Have the tips synced to the Hugo docs website to make those available through docs
* Allow user to disable the Tips feature with minikube config
## Non-Goals
* Modify any existing functionalities or docs
## Design Details
First, we need a static file to store all the tips, we can have a YAML file at [pkg/generate/tips/tips.yaml](https://github.com/kubernetes/minikube/tree/master/pkg/generate):
```YAML
tips:
- |
You can specify any Kubernetes version you want. For example:
```
minikube start --kubernetes-version=v1.19.0
```
- |
You can use minikube's built-in kubectl. For example:
```
minikube kubectl -- get pods
```
- |
minikube has the built-in Kubernetes Dashboard UI. To access it:
```
minikube dashboard
```
```
Use `goembed` to embed this file into the minikube binary.
The current `out.Boxed` has a hard-coded style (red). I propose to add another `out.BoxedWithConfig` method to allow
output with customized style:
```go
// BoxedWithConfig writes a templated message in a box with customized style config to stdout
func BoxedWithConfig(cfg box.Config, st style.Enum, title string, format string, a ...V) {
}
```
Whenever minikube successfully starts, we randomly choose a tip.
Before printing it out, we need to do some regex replacement to strip the markdown syntax
for better view experience in Terminal:
From this:
``````markdown
You can specify any Kubernetes version you want. For example:
```
minikube start --kubernetes-version=v1.19.0
```
``````
To this:
```markdown
You can specify any Kubernetes version you want. For example:
minikube start --kubernetes-version=v1.19.0
```
Then we can print out the tip:
```go
boxCfg := out.BoxConfig{
Config: box.Config{
Py: 1,
Px: 5,
TitlePos: "Top",
Type: "Round",
Color: tipBoxColor,
},
Title: tipTitle,
Icon: style.Tip,
}
out.BoxedWithConfig(boxCfg, tips.Tips[chosen] + "\n\n" + tipSuffix)
```
![Screenshot from 2021-06-18 00-58-02](https://user-images.githubusercontent.com/1311594/122508665-53bd6380-cfd0-11eb-9e99-a6c5935514d5.png)
User can choose to disable this through `minikube config set disable-tips true`
We will have `make generate-docs` generating the docs site based on this YAML file as well.
We can have a `Nice to know` sub-page under `FAQ`?
![Screenshot from 2021-06-18 01-00-30](https://user-images.githubusercontent.com/1311594/122508827-a139d080-cfd0-11eb-98bb-f7c3c1c604c2.png)
### About the tip collection
I plan to start with the command lines and cover almost all CLI usages of minikube.
That includes but not limited to:
- addons
- cached images
- command line completion
- config
- file copy
- dashboard
- delete minikube cluster
- configure minikube's docker/podman env
- image build / load / ls / rm
- ip
- logging
- kubectl
- mount file directory
- multi-node
- pause/unpause to save resource
- multi-profile
- surface URL to a k8s service
- ssh into minikube
- status
- tunnel to connect to LB
- update-check to check versions
- update-context
### Implementation
I plan to open at least 4 PRs:
1. `out.Boxed` with custom style
2. random `tips` display with ability to disable through config, with an initial set of about 10 tips
3. `make generate-docs` to sync tips to docs
4. Add more tips
## Alternatives Considered
1. Is there a more preferred file format to YAML?
2. Maybe we just want to sync the tips to the `FAQ` page list instead of creating a new page?
3. Instead of the file format I proposed, maybe add a `question` field?
```yaml
tips:
- question: How to specify a different Kubernetes version?
answer: |
You can specify any Kubernetes version you want. For example:
```
minikube start --kubernetes-version=v1.19.0
```
- question: Do I have to install `kubectl` myself?
answer: |
You can use minikube's built-in kubectl. For example:
```
minikube kubectl -- get pods
```
- question: How do I access the Kubernetes Dashboard UI?
answer: |
minikube has the built-in Kubernetes Dashboard UI. To access it:
```
minikube dashboard
```
```
On the docs side we can show both questions and answers. On the CLI side
we can either show both questions and answers, or just show the answers
to make it more compact.
![Screenshot from 2021-06-18 01-25-54](https://user-images.githubusercontent.com/1311594/122510785-2c689580-cfd4-11eb-9fd0-0a0ff344e3cc.png)

119
go.mod
View File

@ -5,35 +5,32 @@ go 1.16
require (
cloud.google.com/go/storage v1.15.0
contrib.go.opencensus.io/exporter/stackdriver v0.12.1
github.com/Azure/azure-sdk-for-go v43.3.0+incompatible
github.com/Delta456/box-cli-maker/v2 v2.2.1
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v0.16.0
github.com/Microsoft/hcsshim v0.8.15 // indirect
github.com/Microsoft/hcsshim v0.8.17 // indirect
github.com/Parallels/docker-machine-parallels/v2 v2.0.1
github.com/VividCortex/godaemon v0.0.0-20201030160542-15e3f4925a21
github.com/blang/semver v3.5.1+incompatible
github.com/VividCortex/godaemon v1.0.0
github.com/blang/semver/v4 v4.0.0
github.com/briandowns/spinner v1.11.1
github.com/c4milo/gotoolkit v0.0.0-20170318115440-bcc06269efa9 // indirect
github.com/cenkalti/backoff v2.2.1+incompatible
github.com/cenkalti/backoff/v4 v4.1.0
github.com/c4milo/gotoolkit v0.0.0-20190525173301-67483a18c17a // indirect
github.com/cenkalti/backoff/v4 v4.1.1
github.com/cheggaaa/pb/v3 v3.0.8
github.com/cloudevents/sdk-go/v2 v2.3.1
github.com/cloudfoundry-attic/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21
github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 // indirect
github.com/docker/cli v0.0.0-20200303162255-7d407207c304 // indirect
github.com/docker/docker v17.12.0-ce-rc1.0.20210128214336-420b1d36250f+incompatible
github.com/docker/docker v20.10.7+incompatible
github.com/docker/go-units v0.4.0
github.com/docker/machine v0.16.2
github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f
github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f // indirect
github.com/elazarl/goproxy v0.0.0-20210110162100-a92cc753f88e
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
github.com/google/go-cmp v0.5.5
github.com/google/go-cmp v0.5.6
github.com/google/go-containerregistry v0.4.1
github.com/google/go-github v17.0.0+incompatible
github.com/google/go-github/v32 v32.1.0
github.com/google/slowjam v0.0.0-20200530021616-df27e642fe7b
github.com/google/uuid v1.2.0
github.com/hashicorp/go-getter v1.5.2
github.com/google/go-github/v36 v36.0.0
github.com/google/slowjam v1.0.0
github.com/google/uuid v1.3.0
github.com/gookit/color v1.4.2 // indirect
github.com/hashicorp/go-getter v1.5.5
github.com/hashicorp/go-retryablehttp v0.7.0
github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect
github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214 // indirect
@ -52,14 +49,16 @@ require (
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
github.com/klauspost/cpuid v1.2.0
github.com/libvirt/libvirt-go v3.9.0+incompatible
github.com/machine-drivers/docker-machine-driver-vmware v0.1.1
github.com/machine-drivers/docker-machine-driver-vmware v0.1.3
github.com/mattbaird/jsonpatch v0.0.0-20200820163806-098863c1fc24
github.com/mattn/go-isatty v0.0.12
github.com/mattn/go-isatty v0.0.13
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/mitchellh/go-ps v1.0.0
github.com/moby/hyperkit v0.0.0-20210108224842-2f061e447e14
github.com/moby/sys/mount v0.2.0 // indirect
github.com/olekukonko/tablewriter v0.0.5
github.com/opencontainers/go-digest v1.0.0
github.com/otiai10/copy v1.5.1
github.com/otiai10/copy v1.6.0
github.com/pborman/uuid v1.2.1
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2
github.com/pkg/browser v0.0.0-20160118053552-9302be274faa
@ -68,10 +67,10 @@ require (
github.com/pmezard/go-difflib v1.0.0
github.com/russross/blackfriday v1.5.3-0.20200218234912-41c5fccfd6f6 // indirect
github.com/samalba/dockerclient v0.0.0-20160414174713-91d7393ff859 // indirect
github.com/shirou/gopsutil/v3 v3.21.4
github.com/spf13/cobra v1.1.3
github.com/shirou/gopsutil/v3 v3.21.6
github.com/spf13/cobra v1.2.1
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.7.1
github.com/spf13/viper v1.8.1
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f
github.com/zchee/go-vmnet v0.0.0-20161021174912-97ebf9174097
go.opencensus.io v0.23.0
@ -79,59 +78,55 @@ require (
go.opentelemetry.io/otel/sdk v0.16.0
go.opentelemetry.io/otel/trace v0.17.0
golang.org/x/build v0.0.0-20190927031335-2835ba2e683f
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
golang.org/x/exp v0.0.0-20210220032938-85be41e4509f
golang.org/x/mod v0.4.2
golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72
golang.org/x/text v0.3.6
gonum.org/v1/plot v0.9.0
google.golang.org/api v0.45.0
google.golang.org/api v0.51.0
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect
gopkg.in/yaml.v2 v2.4.0
gotest.tools/v3 v3.0.3 // indirect
k8s.io/api v0.20.5
k8s.io/apimachinery v0.20.5
k8s.io/client-go v0.20.5
k8s.io/klog/v2 v2.8.0
k8s.io/kubectl v0.0.0
k8s.io/kubernetes v1.20.5
k8s.io/api v0.21.3
k8s.io/apimachinery v0.21.3
k8s.io/client-go v0.21.3
k8s.io/klog/v2 v2.10.0
k8s.io/kubectl v0.21.3
k8s.io/kubernetes v1.21.3
sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.3.0
)
replace (
git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999
github.com/briandowns/spinner => github.com/alonyb/spinner v1.12.7
github.com/docker/docker => github.com/afbjorklund/moby v0.0.0-20210308214533-2fa72faf0e8b
github.com/docker/machine => github.com/machine-drivers/machine v0.7.1-0.20210306082426-fcb2ad5bcb17
github.com/docker/machine => github.com/machine-drivers/machine v0.7.1-0.20210719174735-6eca26732baa
github.com/google/go-containerregistry => github.com/afbjorklund/go-containerregistry v0.4.1-0.20210321165649-761f6f9626b1
github.com/samalba/dockerclient => github.com/sayboras/dockerclient v1.0.0
k8s.io/api => k8s.io/api v0.20.5
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.5
k8s.io/apimachinery => k8s.io/apimachinery v0.20.5
k8s.io/apiserver => k8s.io/apiserver v0.20.5
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.5
k8s.io/client-go => k8s.io/client-go v0.20.5
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.5
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.5
k8s.io/code-generator => k8s.io/code-generator v0.20.5
k8s.io/component-base => k8s.io/component-base v0.20.5
k8s.io/component-helpers => k8s.io/component-helpers v0.20.5
k8s.io/controller-manager => k8s.io/controller-manager v0.20.5
k8s.io/cri-api => k8s.io/cri-api v0.20.5
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.5
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.5
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.5
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.5
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.5
k8s.io/kubectl => k8s.io/kubectl v0.20.5
k8s.io/kubelet => k8s.io/kubelet v0.20.5
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.5
k8s.io/metrics => k8s.io/metrics v0.20.5
k8s.io/mount-utils => k8s.io/mount-utils v0.20.5
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.5
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.20.5
k8s.io/sample-controller => k8s.io/sample-controller v0.20.5
k8s.io/api => k8s.io/api v0.21.2
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.2
k8s.io/apimachinery => k8s.io/apimachinery v0.21.2
k8s.io/apiserver => k8s.io/apiserver v0.21.2
k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.2
k8s.io/client-go => k8s.io/client-go v0.21.2
k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.2
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.2
k8s.io/code-generator => k8s.io/code-generator v0.21.2
k8s.io/component-base => k8s.io/component-base v0.21.2
k8s.io/component-helpers => k8s.io/component-helpers v0.21.2
k8s.io/controller-manager => k8s.io/controller-manager v0.21.2
k8s.io/cri-api => k8s.io/cri-api v0.21.2
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.2
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.2
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.2
k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.2
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.2
k8s.io/kubectl => k8s.io/kubectl v0.21.2
k8s.io/kubelet => k8s.io/kubelet v0.21.2
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.2
k8s.io/metrics => k8s.io/metrics v0.21.2
k8s.io/mount-utils => k8s.io/mount-utils v0.21.2
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.2
)

485
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,240 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/csv"
"flag"
"fmt"
"io"
"log"
"os"
"strconv"
"gonum.org/v1/plot"
"gonum.org/v1/plot/plotter"
"gonum.org/v1/plot/plotutil"
"gonum.org/v1/plot/vg"
)
type run struct {
cmd float64
api float64
k8s float64
dnsSvc float64
app float64
dnsAns float64
}
type runs struct {
version string
runs []run
}
func main() {
csvPath := flag.String("csv", "", "path to the CSV file")
chartPath := flag.String("output", "", "path to output the chart to")
flag.Parse()
// map of the apps (minikube, kind, k3d) and their runs
apps := make(map[string]runs)
if err := readInCSV(*csvPath, apps); err != nil {
log.Fatal(err)
}
values, totals, names := values(apps)
if err := createChart(*chartPath, values, totals, names); err != nil {
log.Fatal(err)
}
}
func readInCSV(csvPath string, apps map[string]runs) error {
f, err := os.Open(csvPath)
if err != nil {
return err
}
r := csv.NewReader(f)
for {
d, err := r.Read()
if err == io.EOF {
break
}
if err != nil {
return err
}
// skip the first line of the CSV file
if d[0] == "name" {
continue
}
values := []float64{}
// 8-13 contain the run results
for i := 8; i <= 13; i++ {
v, err := strconv.ParseFloat(d[i], 64)
if err != nil {
return err
}
values = append(values, v)
}
newRun := run{values[0], values[1], values[2], values[3], values[4], values[5]}
// get the app from the map and add the new run to it
name := d[0]
k, ok := apps[name]
if !ok {
k = runs{version: d[5]}
}
k.runs = append(k.runs, newRun)
apps[name] = k
}
return nil
}
func values(apps map[string]runs) ([]plotter.Values, []float64, []string) {
var cmdValues, apiValues, k8sValues, dnsSvcValues, appValues, dnsAnsValues plotter.Values
names := []string{}
totals := []float64{}
// for each app, calculate the average for all the runs, and append them to the charting values
for _, name := range []string{"minikube", "kind", "k3d"} {
app := apps[name]
var cmd, api, k8s, dnsSvc, appRun, dnsAns float64
names = append(names, app.version)
for _, l := range app.runs {
cmd += l.cmd
api += l.api
k8s += l.k8s
dnsSvc += l.dnsSvc
appRun += l.app
dnsAns += l.dnsAns
}
c := float64(len(app.runs))
cmdAvg := cmd / c
apiAvg := api / c
k8sAvg := k8s / c
dnsSvcAvg := dnsSvc / c
appAvg := appRun / c
dnsAnsAvg := dnsAns / c
cmdValues = append(cmdValues, cmdAvg)
apiValues = append(apiValues, apiAvg)
k8sValues = append(k8sValues, k8sAvg)
dnsSvcValues = append(dnsSvcValues, dnsSvcAvg)
appValues = append(appValues, appAvg)
dnsAnsValues = append(dnsAnsValues, dnsAnsAvg)
total := cmdAvg + apiAvg + k8sAvg + dnsSvcAvg + appAvg + dnsAnsAvg
totals = append(totals, total)
}
values := []plotter.Values{cmdValues, apiValues, k8sValues, dnsSvcValues, appValues, dnsAnsValues}
return values, totals, names
}
func createChart(chartPath string, values []plotter.Values, totals []float64, names []string) error {
p := plot.New()
p.Title.Text = "Time to go from 0 to successful Kubernetes deployment"
p.Y.Label.Text = "time (seconds)"
bars := []*plotter.BarChart{}
// create bars for all the values
for i, v := range values {
bar, err := createBars(v, i)
if err != nil {
return err
}
bars = append(bars, bar)
p.Add(bar)
}
// stack the bars
bars[0].StackOn(bars[1])
bars[1].StackOn(bars[2])
bars[2].StackOn(bars[3])
bars[3].StackOn(bars[4])
bars[4].StackOn(bars[5])
// max Y value of the chart
p.Y.Max = 80
// add all the bars to the legend
legends := []string{"Command Exec", "API Server Answering", "Kubernetes SVC", "DNS SVC", "App Running", "DNS Answering"}
for i, bar := range bars {
p.Legend.Add(legends[i], bar)
}
p.Legend.Top = true
// add app name to the bars
p.NominalX(names...)
// create total time labels
var labels []string
for _, total := range totals {
label := fmt.Sprintf("%.2f", total)
labels = append(labels, label)
}
// create label positions
var labelPositions []plotter.XY
for i := range totals {
x := float64(i) - 0.03
y := totals[i] + 0.3
labelPosition := plotter.XY{X: x, Y: y}
labelPositions = append(labelPositions, labelPosition)
}
l, err := plotter.NewLabels(plotter.XYLabels{
XYs: labelPositions,
Labels: labels,
},
)
if err != nil {
return err
}
p.Add(l)
if err := p.Save(12*vg.Inch, 8*vg.Inch, chartPath); err != nil {
return err
}
return nil
}
func createBars(values plotter.Values, index int) (*plotter.BarChart, error) {
bars, err := plotter.NewBarChart(values, vg.Points(20))
if err != nil {
return nil, err
}
bars.LineStyle.Width = vg.Length(0)
bars.Width = vg.Length(80)
bars.Color = plotutil.Color(index)
return bars, nil
}

View File

@ -0,0 +1,4 @@
testcases:
minikube:
setup: minikube start --container-runtime=containerd --memory=max --cpus=max
teardown: minikube delete

View File

@ -0,0 +1,4 @@
testcases:
minikube:
setup: minikube start --container-runtime=docker --memory=max --cpus=max
teardown: minikube delete

View File

@ -0,0 +1,219 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/csv"
"encoding/json"
"flag"
"image/color"
"io"
"log"
"os"
"strconv"
"time"
"gonum.org/v1/plot"
"gonum.org/v1/plot/plotter"
"gonum.org/v1/plot/vg"
"gonum.org/v1/plot/vg/draw"
)
// benchmark contains the duration of the benchmark steps
type benchmark struct {
Date time.Time `json:"date"`
Cmd float64 `json:"cmd"`
API float64 `json:"api"`
K8s float64 `json:"k8s"`
DNSSvc float64 `json:"dnsSvc"`
App float64 `json:"app"`
DNSAns float64 `json:"dnsAns"`
Total float64 `json:"total"`
}
// benchmarks contains a list of benchmarks, used for storing benchmark results to JSON
type benchmarks struct {
Benchmarks []benchmark `json:"benchmarks"`
}
func main() {
latestBenchmarkPath := flag.String("csv", "", "path to the CSV file containing the latest benchmark result")
chartOutputPath := flag.String("output", "", "path to output the chart to")
pastBenchmarksPath := flag.String("past-runs", "", "path to the JSON file containing the past benchmark results")
flag.Parse()
latestBenchmark := readInLatestBenchmark(*latestBenchmarkPath)
pastBenchmarks := readInPastBenchmarks(*pastBenchmarksPath)
pastBenchmarks.Benchmarks = append(pastBenchmarks.Benchmarks, latestBenchmark)
updateRunsFile(pastBenchmarks, *pastBenchmarksPath)
createChart(pastBenchmarks.Benchmarks, *chartOutputPath)
}
// readInLatestBenchmark reads in the latest benchmark result from a CSV file
func readInLatestBenchmark(latestBenchmarkPath string) benchmark {
f, err := os.Open(latestBenchmarkPath)
if err != nil {
log.Fatal(err)
}
var cmd, api, k8s, dnsSvc, app, dnsAns float64
steps := []*float64{&cmd, &api, &k8s, &dnsSvc, &app, &dnsAns}
count := 0
r := csv.NewReader(f)
for {
line, err := r.Read()
if err == io.EOF {
break
}
if err != nil {
log.Fatal(err)
}
// skip the first line of the CSV file
if line[0] == "name" {
continue
}
values := []float64{}
// 8-13 contain the benchmark results
for i := 8; i <= 13; i++ {
v, err := strconv.ParseFloat(line[i], 64)
if err != nil {
log.Fatal(err)
}
values = append(values, v)
}
count++
for i, step := range steps {
*step += values[i]
}
}
var total float64
for _, step := range steps {
*step /= float64(count)
total += *step
}
return benchmark{time.Now(), cmd, api, k8s, dnsSvc, app, dnsAns, total}
}
// readInPastBenchmarks reads in the past benchmark results from a JSON file
func readInPastBenchmarks(pastBenchmarksPath string) *benchmarks {
data, err := os.ReadFile(pastBenchmarksPath)
if err != nil {
log.Fatal(err)
}
b := &benchmarks{}
if err := json.Unmarshal(data, b); err != nil {
log.Fatal(err)
}
return b
}
// updateRunsFile overwrites the run file with the updated benchmarks list
func updateRunsFile(h *benchmarks, pastRunsPath string) {
b, err := json.Marshal(h)
if err != nil {
log.Fatal(err)
}
if err := os.WriteFile(pastRunsPath, b, 0600); err != nil {
log.Fatal(err)
}
}
// createChart creates a time series chart of the benchmarks
func createChart(benchmarks []benchmark, chartOutputPath string) {
n := len(benchmarks)
var cmdXYs, apiXYs, k8sXYs, dnsSvcXYs, appXYs, dnsAnsXYs, totalXYs plotter.XYs
xys := []*plotter.XYs{&cmdXYs, &apiXYs, &k8sXYs, &dnsSvcXYs, &appXYs, &dnsAnsXYs, &totalXYs}
for _, xy := range xys {
*xy = make(plotter.XYs, n)
}
for i, b := range benchmarks {
date := float64(b.Date.Unix())
xyValues := []struct {
xys *plotter.XYs
value float64
}{
{&cmdXYs, b.Cmd},
{&apiXYs, b.API},
{&k8sXYs, b.K8s},
{&dnsSvcXYs, b.DNSSvc},
{&appXYs, b.App},
{&dnsAnsXYs, b.DNSAns},
{&totalXYs, b.Total},
}
for _, xyValue := range xyValues {
xy := &(*xyValue.xys)[i]
xy.Y = xyValue.value
xy.X = date
}
}
p := plot.New()
p.Add(plotter.NewGrid())
p.Legend.Top = true
p.Title.Text = "time-to-k8s"
p.X.Label.Text = "date"
p.X.Tick.Marker = plot.TimeTicks{Format: "2006-01-02"}
p.Y.Label.Text = "time (seconds)"
p.Y.Max = 95
steps := []struct {
xys plotter.XYs
rgba color.RGBA
label string
}{
{cmdXYs, color.RGBA{R: 255, A: 255}, "Command Exec"},
{apiXYs, color.RGBA{G: 255, A: 255}, "API Server Answering"},
{k8sXYs, color.RGBA{B: 255, A: 255}, "Kubernetes SVC"},
{dnsSvcXYs, color.RGBA{R: 255, B: 255, A: 255}, "DNS SVC"},
{appXYs, color.RGBA{R: 255, G: 255, A: 255}, "App Running"},
{dnsAnsXYs, color.RGBA{G: 255, B: 255, A: 255}, "DNS Answering"},
{totalXYs, color.RGBA{B: 255, R: 140, A: 255}, "Total"},
}
for _, step := range steps {
line, points := newLinePoints(step.xys, step.rgba)
p.Add(line, points)
p.Legend.Add(step.label, line)
}
if err := p.Save(12*vg.Inch, 8*vg.Inch, chartOutputPath); err != nil {
log.Fatal(err)
}
}
func newLinePoints(xys plotter.XYs, lineColor color.RGBA) (*plotter.Line, *plotter.Scatter) {
line, points, err := plotter.NewLinePoints(xys)
if err != nil {
log.Fatal(err)
}
line.Color = lineColor
points.Color = lineColor
points.Shape = draw.CircleGlyph{}
return line, points
}

View File

@ -0,0 +1,54 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
# container-runtime (docker or containerd)
RUNTIME="$1"
install_minikube() {
make
sudo install ./out/minikube /usr/local/bin/minikube
}
run_benchmark() {
( cd ./hack/benchmark/time-to-k8s/time-to-k8s-repo/ &&
git submodule update --init &&
go run . --config "../public-chart/$RUNTIME-benchmark.yaml" --iterations 10 --output ./output.csv )
}
generate_chart() {
go run ./hack/benchmark/time-to-k8s/public-chart/generate-chart.go --csv ./hack/benchmark/time-to-k8s/time-to-k8s-repo/output.csv --output ./chart.png --past-runs ./runs.json
}
cleanup() {
rm ./runs.json
rm ./hack/benchmark/time-to-k8s/time-to-k8s-repo/output.csv
rm ./chart.png
}
gsutil -m cp "gs://minikube-time-to-k8s/$RUNTIME-runs.json" ./runs.json
install_minikube
run_benchmark
generate_chart
gsutil -m cp ./runs.json "gs://minikube-time-to-k8s/$RUNTIME-runs.json"
gsutil -m cp ./runs.json "gs://minikube-time-to-k8s/$(date +'%Y-%m-%d')-$RUNTIME.json"
gsutil -m cp ./chart.png "gs://minikube-time-to-k8s/$RUNTIME-chart.png"
cleanup

@ -0,0 +1 @@
Subproject commit f6f6b2db9e718f7c9af698b6247b232a7251522f

View File

@ -0,0 +1,60 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
install_kind() {
curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/latest/download/kind-linux-amd64
chmod +x ./kind
sudo mv ./kind /usr/local/bin/kind
}
install_k3d() {
curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash
}
install_minikube() {
make
sudo install ./out/minikube /usr/local/bin/minikube
}
run_benchmark() {
( cd ./hack/benchmark/time-to-k8s/time-to-k8s-repo/ &&
git submodule update --init &&
go run . --config local-kubernetes.yaml --iterations 10 --output output.csv )
}
generate_chart() {
go run ./hack/benchmark/time-to-k8s/chart.go --csv ./hack/benchmark/time-to-k8s/time-to-k8s-repo/output.csv --output ./site/static/images/benchmarks/timeToK8s/"$1".png
}
create_page() {
printf -- "---\ntitle: \"%s Benchmark\"\nlinkTitle: \"%s Benchmark\"\nweight: -$(date +'%Y%m%d')\n---\n\n![time-to-k8s](/images/benchmarks/timeToK8s/%s.png)\n" "$1" "$1" "$1" > ./site/content/en/docs/benchmarks/timeToK8s/"$1".md
}
cleanup() {
rm ./hack/benchmark/time-to-k8s/time-to-k8s-repo/output.csv
}
install_kind
install_k3d
install_minikube
VERSION=$(minikube version --short)
run_benchmark
generate_chart "$VERSION"
create_page "$VERSION"
cleanup

View File

@ -0,0 +1,48 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PROGRESS_MARK=/var/run/job.in.progress
REBOOT_MARK=/var/run/reboot.in.progress
timeout=900 # 15 minutes
# $PROGRESS_MARK file is touched when a new GitHub Actions job is started
# check that no job was started in the last 15 minutes
function check_running_job() {
if [[ -f "$PROGRESS_MARK" ]]; then
started=$(date -r "$PROGRESS_MARK" +%s)
elapsed=$(($(date +%s) - started))
if (( elapsed > timeout )); then
echo "Job started ${elapsed} seconds ago, going to restart"
sudo rm -rf "$PROGRESS_MARK"
else
echo "Job is running. exit."
exit 1
fi
fi
}
check_running_job
sudo touch "$REBOOT_MARK"
# avoid race if a job was started between two lines above and recheck
check_running_job
echo "cleanup docker..."
docker kill $(docker ps -aq) >/dev/null 2>&1 || true
docker system prune --volumes --force || true
echo "rebooting..."
sudo reboot

View File

@ -0,0 +1,32 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [[ ! -f cleanup.sh ]]; then
echo "cleanup.sh is missing"
exit 1
fi
# update cron to run the cleanup script every hour
if [ ! -f /etc/cron.hourly/cleanup.sh ]; then
sudo install cleanup.sh /etc/cron.hourly/cleanup.sh || echo "FAILED TO INSTALL CLEANUP"
fi
# install a cron rule to remove /var/run/reboot.in.progress file immediately after reboot
if (crontab -l 2>/dev/null | grep '@reboot rm -rf /var/run/reboot.in.progress'); then
echo "reboot cron rule already installed"
exit 0
fi
(crontab -l 2>/dev/null; echo '@reboot rm -rf /var/run/reboot.in.progress') | crontab -

View File

@ -45,16 +45,11 @@ make release-iso | tee iso-logs.txt
ec=$?
if [ $ec -gt 0 ]; then
if [ "$release" = false ]; then
err=$(tail -100 iso-logs.txt)
gh pr comment ${ghprbPullId} --body "Hi ${ghprbPullAuthorLoginMention}, building a new ISO failed, with the error below:
<details>
<pre>
${err}
</pre>
</details>
Full logs are at https://storage.cloud.google.com/minikube-builds/logs/${ghprbPullId}/${ghprbActualCommit:0:7}/iso_build.txt
gh pr comment ${ghprbPullId} --body "Hi ${ghprbPullAuthorLoginMention}, building a new ISO failed.
See the logs at:
```
https://storage.cloud.google.com/minikube-builds/logs/${ghprbPullId}/iso-${BUILD_NUMBER}/iso_build.txt
```
"
fi
exit $ec

View File

@ -25,8 +25,9 @@
set -ex
gcloud cloud-shell ssh --authorize-session << EOF
OS_ARCH="linux-amd64"
VM_DRIVER="docker"
OS="linux"
ARCH="amd64"
DRIVER="docker"
JOB_NAME="Docker_Cloud_Shell"
CONTAINER_RUNTIME="docker"
EXTRA_TEST_ARGS="-test.run (TestFunctional|TestAddons)"

81
hack/jenkins/common.ps1 Normal file
View File

@ -0,0 +1,81 @@
# Copyright 2019 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
mkdir -p out
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh.exe", "C:\Go\bin\gopogh.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/gotestyourself/gotestsum/releases/download/v1.6.4/gotestsum_1.6.4_windows_amd64.tar.gz", "$env:TEMP\gotestsum.tar.gz")
tar --directory "C:\Go\bin\" -xzvf "$env:TEMP\gotestsum.tar.gz" "gotestsum.exe"
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/minikube-windows-amd64.exe out/
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/e2e-windows-amd64.exe out/
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata .
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/windows_integration_setup.ps1 out/
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/windows_integration_teardown.ps1 out/
./out/minikube-windows-amd64.exe delete --all
./out/windows_integration_setup.ps1
$started=Get-Date -UFormat %s
gotestsum --jsonfile testout.json -f standard-verbose --raw-command -- `
go tool test2json -t `
out/e2e-windows-amd64.exe --minikube-start-args="--driver=$driver" --binary=out/minikube-windows-amd64.exe --test.v --test.timeout=$timeout |
Tee-Object -FilePath testout.txt
$env:result=$lastexitcode
# If the last exit code was 0->success, x>0->error
If($env:result -eq 0){
$env:status="success"
echo "minikube: SUCCESS"
} Else {
$env:status="failure"
echo "minikube: FAIL"
}
$ended=Get-Date -UFormat %s
$elapsed=$ended-$started
$elapsed=$elapsed/60
$elapsed=[math]::Round($elapsed, 2)
$gopogh_status=gopogh --in testout.json --out_html testout.html --out_summary testout_summary.json --name "$env:JOB_NAME" -pr $env:MINIKUBE_LOCATION --repo github.com/kubernetes/minikube/ --details "${env:COMMIT}:$(Get-Date -Format "yyyy-MM-dd"):$env:ROOT_JOB_ID"
$failures=echo $gopogh_status | jq '.NumberOfFail'
$tests=echo $gopogh_status | jq '.NumberOfTests'
$bad_status="$failures / $tests failures"
$description="$status in $elapsed minute(s)."
If($env:status -eq "failure") {
$description="completed with $bad_status in $elapsed minute(s)."
}
echo $description
$env:SHORT_COMMIT=$env:COMMIT.substring(0, 7)
$gcs_bucket="minikube-builds/logs/$env:MINIKUBE_LOCATION/$env:ROOT_JOB_ID"
#Upload logs to gcs
gsutil -qm cp testout.txt gs://$gcs_bucket/${env:JOB_NAME}out.txt
gsutil -qm cp testout.json gs://$gcs_bucket/${env:JOB_NAME}.json
gsutil -qm cp testout.html gs://$gcs_bucket/${env:JOB_NAME}.html
gsutil -qm cp testout_summary.json gs://$gcs_bucket/${env:JOB_NAME}_summary.json
$env:target_url="https://storage.googleapis.com/$gcs_bucket/$env:JOB_NAME.html"
# Update the PR with the new info
$json = "{`"state`": `"$env:status`", `"description`": `"Jenkins: $description`", `"target_url`": `"$env:target_url`", `"context`": `"${env:JOB_NAME}`"}"
Invoke-WebRequest -Uri "https://api.github.com/repos/kubernetes/minikube/statuses/$env:COMMIT`?access_token=$env:access_token" -Body $json -ContentType "application/json" -Method Post -usebasicparsing
./out/windows_integration_teardown.ps1
Exit $env:result

View File

@ -18,46 +18,96 @@
# This script downloads the test files from the build bucket and makes some executable.
# The script expects the following env variables:
# OS_ARCH: The operating system and the architecture separated by a hyphen '-' (e.g. darwin-amd64, linux-amd64, windows-amd64)
# VM_DRIVER: the driver to use for the test
# OS: The operating system
# ARCH: The architecture
# DRIVER: the driver to use for the test
# CONTAINER_RUNTIME: the container runtime to use for the test
# EXTRA_START_ARGS: additional flags to pass into minikube start
# EXTRA_TEST_ARGS: additional flags to pass into go test
# JOB_NAME: the name of the logfile and check name to update on github
readonly OS_ARCH="${OS}-${ARCH}"
readonly TEST_ROOT="${HOME}/minikube-integration"
readonly TEST_HOME="${TEST_ROOT}/${OS_ARCH}-${VM_DRIVER}-${CONTAINER_RUNTIME}-${MINIKUBE_LOCATION}-$$-${COMMIT}"
readonly TEST_HOME="${TEST_ROOT}/${OS_ARCH}-${DRIVER}-${CONTAINER_RUNTIME}-${MINIKUBE_LOCATION}-$$-${COMMIT}"
export GOPATH="$HOME/go"
export KUBECONFIG="${TEST_HOME}/kubeconfig"
export PATH=$PATH:"/usr/local/bin/:/usr/local/go/bin/:$GOPATH/bin"
readonly TIMEOUT=${1:-90m}
readonly TIMEOUT=${1:-120m}
public_log_url="https://storage.googleapis.com/minikube-builds/logs/${MINIKUBE_LOCATION}/${ROOT_JOB_ID}/${JOB_NAME}.html"
# retry_github_status provides reliable github status updates
function retry_github_status() {
local commit=$1
local context=$2
local state=$3
local token=$4
local target=$5
local desc=$6
# Retry in case we hit our GitHub API quota or fail other ways.
local attempt=0
local timeout=2
local code=-1
echo "set GitHub status $context to $desc"
while [[ "${attempt}" -lt 8 ]]; do
local out=$(mktemp)
code=$(curl -o "${out}" -s --write-out "%{http_code}" -L -u minikube-bot:${token} \
"https://api.github.com/repos/kubernetes/minikube/statuses/${commit}" \
-H "Content-Type: application/json" \
-X POST \
-d "{\"state\": \"${state}\", \"description\": \"Jenkins: ${desc}\", \"target_url\": \"${target}\", \"context\": \"${context}\"}" || echo 999)
# 2xx HTTP codes
if [[ "${code}" =~ ^2 ]]; then
break
fi
cat "${out}" && rm -f "${out}"
echo "HTTP code ${code}! Retrying in ${timeout} .."
sleep "${timeout}"
attempt=$(( attempt + 1 ))
timeout=$(( timeout * 5 ))
done
}
if [ "$(uname)" = "Darwin" ]; then
if ! bash setup_docker_desktop_macos.sh; then
retry_github_status "${COMMIT}" "${JOB_NAME}" "failure" "${access_token}" "${public_log_url}" "Jenkins: docker failed to start"
exit 1
fi
fi
# We need pstree for the restart cronjobs
if [ "$(uname)" != "Darwin" ]; then
sudo apt-get -y install lsof psmisc
else
brew install pstree
brew install pstree coreutils pidof
ln -s /usr/local/bin/gtimeout /usr/local/bin/timeout || true
fi
# installing golang so we could do go get for gopogh
sudo ./installers/check_install_golang.sh "1.16" "/usr/local" || true
./installers/check_install_golang.sh "1.16.6" "/usr/local" || true
# install docker and kubectl if not present, currently skipping since it fails
#sudo ./installers/check_install_docker.sh || true
# install docker and kubectl if not present
sudo ARCH="$ARCH" ./installers/check_install_docker.sh || true
# install gotestsum if not present
GOROOT="/usr/local/go" ./installers/check_install_gotestsum.sh || true
# let's just clean all docker artifacts up
docker system prune --force --volumes || true
docker system df || true
# clean up /tmp
find /tmp -name . -o -prune -exec rm -rf -- {} + >/dev/null 2>&1 || true
echo ">> Starting at $(date)"
echo ""
echo "arch: ${OS_ARCH}"
echo "build: ${MINIKUBE_LOCATION}"
echo "driver: ${VM_DRIVER}"
echo "driver: ${DRIVER}"
echo "runtime: ${CONTAINER_RUNTIME}"
echo "job: ${JOB_NAME}"
echo "test home: ${TEST_HOME}"
@ -71,7 +121,7 @@ echo "podman: $(sudo podman version --format '{{.Version}}' || true)"
echo "go: $(go version || true)"
case "${VM_DRIVER}" in
case "${DRIVER}" in
kvm2)
echo "virsh: $(virsh --version)"
;;
@ -93,10 +143,9 @@ if ! type -P gsutil >/dev/null; then
fi
# Add the out/ directory to the PATH, for using new drivers.
PATH="$(pwd)/out/":$PATH
export PATH
export PATH="$(pwd)/out/":$PATH
echo ""
echo
echo ">> Downloading test inputs from ${MINIKUBE_LOCATION} ..."
gsutil -qm cp \
"gs://minikube-builds/${MINIKUBE_LOCATION}/minikube-${OS_ARCH}" \
@ -126,10 +175,9 @@ fi
mkdir -p "${TEST_ROOT}"
# Cleanup stale test outputs.
echo ""
echo
echo ">> Cleaning up after previous test runs ..."
for entry in $(ls ${TEST_ROOT}); do
test_path="${TEST_ROOT}/${entry}"
for test_path in ${TEST_ROOT}; do
ls -lad "${test_path}" || continue
echo "* Cleaning stale test path: ${test_path}"
@ -145,7 +193,7 @@ for entry in $(ls ${TEST_ROOT}); do
for kconfig in $(find ${test_path} -name kubeconfig -type f); do
sudo rm -f "${kconfig}"
done
## ultimate shotgun clean up docker after we tried all
docker rm -f -v $(docker ps -aq) >/dev/null 2>&1 || true
@ -157,105 +205,95 @@ for entry in $(ls ${TEST_ROOT}); do
fi
done
# sometimes tests left over zombie procs that won't exit
# for example:
# jenkins 20041 0.0 0.0 0 0 ? Z Aug19 0:00 [minikube-linux-] <defunct>
zombie_defuncts=$(ps -A -ostat,ppid | awk '/[zZ]/ && !a[$2]++ {print $2}')
if [[ "${zombie_defuncts}" != "" ]]; then
echo "Found zombie defunct procs to kill..."
ps -f -p ${zombie_defuncts} || true
kill ${zombie_defuncts} || true
fi
if type -P virsh; then
sudo virsh -c qemu:///system list --all --uuid \
| xargs -I {} sh -c "sudo virsh -c qemu:///system destroy {}; sudo virsh -c qemu:///system undefine {}" \
|| true
echo ">> virsh VM list after clean up (should be empty):"
sudo virsh -c qemu:///system list --all || true
for NET in $( sudo virsh -c qemu:///system net-list --all --name ); do
if [ "${NET}" != "default" ]; then
sudo virsh -c qemu:///system net-destroy "${NET}" || \
sudo virsh -c qemu:///system net-undefine "${NET}" || true
fi
done
echo ">> virsh VM networks list after clean up (should have only 'default'):"
sudo virsh -c qemu:///system net-list --all || true
echo ">> host networks after KVM clean up:"
sudo ip link show || true
echo
fi
if type -P vboxmanage; then
killall VBoxHeadless || true
sleep 1
killall -9 VBoxHeadless || true
for guid in $(vboxmanage list vms | grep -Eo '\{[a-zA-Z0-9-]+\}'); do
echo "- Removing stale VirtualBox VM: $guid"
vboxmanage startvm "${guid}" --type emergencystop || true
vboxmanage unregistervm "${guid}" || true
done
ifaces=$(vboxmanage list hostonlyifs | grep -E "^Name:" | awk '{ print $2 }')
for if in $ifaces; do
vboxmanage hostonlyif remove "${if}" || true
done
echo ">> VirtualBox VM list after clean up (should be empty):"
vboxmanage list vms || true
echo ">> VirtualBox interface list after clean up (should be empty):"
vboxmanage list hostonlyifs || true
fi
if type -P hdiutil; then
hdiutil info | grep -E "/dev/disk[1-9][^s]" || true
hdiutil info \
| grep -E "/dev/disk[1-9][^s]" \
| awk '{print $1}' \
| xargs -I {} sh -c "hdiutil detach {}" \
|| true
fi
# cleaning up stale hyperkits
if type -P hyperkit; then
for pid in $(pgrep hyperkit); do
echo "Killing stale hyperkit $pid"
ps -f -p $pid || true
kill $pid || true
kill -9 $pid || true
done
fi
if [[ "${VM_DRIVER}" == "hyperkit" ]]; then
if [[ -e out/docker-machine-driver-hyperkit ]]; then
sudo chown root:wheel out/docker-machine-driver-hyperkit || true
sudo chmod u+s out/docker-machine-driver-hyperkit || true
function cleanup_procs() {
# sometimes tests left over zombie procs that won't exit
# for example:
# jenkins 20041 0.0 0.0 0 0 ? Z Aug19 0:00 [minikube-linux-] <defunct>
pgrep docker > d.pids
zombie_defuncts=$(ps -A -ostat,ppid | grep -v -f d.pids | awk '/[zZ]/ && !a[$2]++ {print $2}')
if [[ "${zombie_defuncts}" != "" ]]; then
echo "Found zombie defunct procs to kill..."
ps -f -p ${zombie_defuncts} || true
kill ${zombie_defuncts} || true
fi
fi
kprocs=$(pgrep kubectl || true)
if [[ "${kprocs}" != "" ]]; then
echo "error: killing hung kubectl processes ..."
ps -f -p ${kprocs} || true
sudo -E kill ${kprocs} || true
fi
if type -P virsh; then
virsh -c qemu:///system list --all --uuid \
| xargs -I {} sh -c "virsh -c qemu:///system destroy {}; virsh -c qemu:///system undefine {}" \
|| true
echo ">> virsh VM list after clean up (should be empty):"
virsh -c qemu:///system list --all || true
fi
if type -P vboxmanage; then
killall VBoxHeadless || true
sleep 1
killall -9 VBoxHeadless || true
for guid in $(vboxmanage list vms | grep -Eo '\{[a-zA-Z0-9-]+\}'); do
echo "- Removing stale VirtualBox VM: $guid"
vboxmanage startvm "${guid}" --type emergencystop || true
vboxmanage unregistervm "${guid}" || true
done
ifaces=$(vboxmanage list hostonlyifs | grep -E "^Name:" | awk '{ print $2 }')
for if in $ifaces; do
vboxmanage hostonlyif remove "${if}" || true
done
echo ">> VirtualBox VM list after clean up (should be empty):"
vboxmanage list vms || true
echo ">> VirtualBox interface list after clean up (should be empty):"
vboxmanage list hostonlyifs || true
fi
if type -P hdiutil; then
hdiutil info | grep -E "/dev/disk[1-9][^s]" || true
hdiutil info \
| grep -E "/dev/disk[1-9][^s]" \
| awk '{print $1}' \
| xargs -I {} sh -c "hdiutil detach {}" \
|| true
fi
# cleaning up stale hyperkits
if type -P hyperkit; then
for pid in $(pgrep hyperkit); do
echo "Killing stale hyperkit $pid"
ps -f -p $pid || true
kill $pid || true
kill -9 $pid || true
done
fi
if [[ "${DRIVER}" == "hyperkit" ]]; then
if [[ -e out/docker-machine-driver-hyperkit ]]; then
sudo chown root:wheel out/docker-machine-driver-hyperkit || true
sudo chmod u+s out/docker-machine-driver-hyperkit || true
fi
fi
kprocs=$(pgrep kubectl || true)
if [[ "${kprocs}" != "" ]]; then
echo "error: killing hung kubectl processes ..."
ps -f -p ${kprocs} || true
sudo -E kill ${kprocs} || true
fi
# clean up none drivers binding on 8443
none_procs=$(sudo lsof -i :8443 | tail -n +2 | awk '{print $2}' || true)
if [[ "${none_procs}" != "" ]]; then
echo "Found stale api servers listening on 8443 processes to kill: "
for p in $none_procs
do
echo "Kiling stale none driver: $p"
sudo -E ps -f -p $p || true
sudo -E kill $p || true
sudo -E kill -9 $p || true
done
fi
# clean up none drivers binding on 8443
none_procs=$(sudo lsof -i :8443 | tail -n +2 | awk '{print $2}' || true)
if [[ "${none_procs}" != "" ]]; then
echo "Found stale api servers listening on 8443 processes to kill: "
for p in $none_procs
do
echo "Kiling stale none driver: $p"
sudo -E ps -f -p $p || true
sudo -E kill $p || true
sudo -E kill -9 $p || true
done
fi
}
function cleanup_stale_routes() {
local show="netstat -rn -f inet"
@ -273,6 +311,7 @@ function cleanup_stale_routes() {
done
}
cleanup_procs || true
cleanup_stale_routes || true
mkdir -p "${TEST_HOME}"
@ -324,11 +363,20 @@ then
EXTRA_START_ARGS="${EXTRA_START_ARGS} --container-runtime=${CONTAINER_RUNTIME}"
fi
${SUDO_PREFIX}${E2E_BIN} \
-minikube-start-args="--driver=${VM_DRIVER} ${EXTRA_START_ARGS}" \
-test.timeout=${TIMEOUT} -test.v \
${EXTRA_TEST_ARGS} \
-binary="${MINIKUBE_BIN}" 2>&1 | tee "${TEST_OUT}"
if test -f "${JSON_OUT}"; then
rm "${JSON_OUT}" || true # clean up previous runs of same build
fi
touch "${JSON_OUT}"
gotestsum --jsonfile "${JSON_OUT}" -f standard-verbose --raw-command -- \
go tool test2json -t \
${SUDO_PREFIX}${E2E_BIN} \
-minikube-start-args="--driver=${DRIVER} ${EXTRA_START_ARGS}" \
-test.timeout=${TIMEOUT} -test.v \
${EXTRA_TEST_ARGS} \
-binary="${MINIKUBE_BIN}" 2>&1 \
| tee "${TEST_OUT}"
result=${PIPESTATUS[0]} # capture the exit code of the first cmd in pipe.
set +x
@ -343,33 +391,16 @@ else
echo "minikube: FAIL"
fi
## caclucate the time took to finish running e2e binary test.
# calculate the time took to finish running e2e binary test.
e2e_end_time="$(date -u +%s)"
elapsed=$(($e2e_end_time-$e2e_start_time))
min=$(($elapsed/60))
sec=$(tail -c 3 <<< $((${elapsed}00/60)))
elapsed=$min.$sec
SHORT_COMMIT=${COMMIT:0:7}
JOB_GCS_BUCKET="minikube-builds/logs/${MINIKUBE_LOCATION}/${SHORT_COMMIT}/${JOB_NAME}"
echo ">> Copying ${TEST_OUT} to gs://${JOB_GCS_BUCKET}out.txt"
gsutil -qm cp "${TEST_OUT}" "gs://${JOB_GCS_BUCKET}out.txt"
echo ">> Attmpting to convert test logs to json"
if test -f "${JSON_OUT}"; then
rm "${JSON_OUT}" || true # clean up previous runs of same build
fi
touch "${JSON_OUT}"
# Generate JSON output
echo ">> Running go test2json"
go tool test2json -t < "${TEST_OUT}" > "${JSON_OUT}" || true
if ! type "jq" > /dev/null; then
echo ">> Installing jq"
if [ "$(uname)" != "Darwin" ]; then
if [ "${OS}" != "darwin" ]; then
curl -LO https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 && sudo install jq-linux64 /usr/local/bin/jq
else
curl -LO https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 && sudo install jq-osx-amd64 /usr/local/bin/jq
@ -377,11 +408,9 @@ echo ">> Installing jq"
fi
echo ">> Installing gopogh"
if [ "$(uname)" != "Darwin" ]; then
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-linux-amd64 && sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
else
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-darwin-amd64 && sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
fi
curl -LO "https://github.com/medyagh/gopogh/releases/download/v0.9.0/gopogh-${OS_ARCH}"
sudo install "gopogh-${OS_ARCH}" /usr/local/bin/gopogh
echo ">> Running gopogh"
if test -f "${HTML_OUT}"; then
@ -390,7 +419,7 @@ fi
touch "${HTML_OUT}"
touch "${SUMMARY_OUT}"
gopogh_status=$(gopogh -in "${JSON_OUT}" -out_html "${HTML_OUT}" -out_summary "${SUMMARY_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}") || true
gopogh_status=$(gopogh -in "${JSON_OUT}" -out_html "${HTML_OUT}" -out_summary "${SUMMARY_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}:$(date +%Y-%m-%d):${ROOT_JOB_ID}") || true
fail_num=$(echo $gopogh_status | jq '.NumberOfFail')
test_num=$(echo $gopogh_status | jq '.NumberOfTests')
pessimistic_status="${fail_num} / ${test_num} failures"
@ -398,20 +427,38 @@ description="completed with ${status} in ${elapsed} minute(s)."
if [ "$status" = "failure" ]; then
description="completed with ${pessimistic_status} in ${elapsed} minute(s)."
fi
echo $description
echo "$description"
echo ">> uploading ${JSON_OUT}"
gsutil -qm cp "${JSON_OUT}" "gs://${JOB_GCS_BUCKET}.json" || true
echo ">> uploading ${HTML_OUT}"
gsutil -qm cp "${HTML_OUT}" "gs://${JOB_GCS_BUCKET}.html" || true
echo ">> uploading ${SUMMARY_OUT}"
gsutil -qm cp "${SUMMARY_OUT}" "gs://${JOB_GCS_BUCKET}_summary.json" || true
REPORT_URL_BASE="https://storage.googleapis.com"
if [ -z "${EXTERNAL}" ]; then
# If we're already in GCP, then upload results to GCS directly
SHORT_COMMIT=${COMMIT:0:7}
JOB_GCS_BUCKET="minikube-builds/logs/${MINIKUBE_LOCATION}/${ROOT_JOB_ID}/${JOB_NAME}"
echo ">> Copying ${TEST_OUT} to gs://${JOB_GCS_BUCKET}.out.txt"
echo ">> public URL: ${REPORT_URL_BASE}/${JOB_GCS_BUCKET}.out.txt"
gsutil -qm cp "${TEST_OUT}" "gs://${JOB_GCS_BUCKET}.out.txt"
public_log_url="https://storage.googleapis.com/${JOB_GCS_BUCKET}.txt"
if grep -q html "$HTML_OUT"; then
public_log_url="https://storage.googleapis.com/${JOB_GCS_BUCKET}.html"
echo ">> uploading ${JSON_OUT} to gs://${JOB_GCS_BUCKET}.json"
echo ">> public URL: ${REPORT_URL_BASE}/${JOB_GCS_BUCKET}.json"
gsutil -qm cp "${JSON_OUT}" "gs://${JOB_GCS_BUCKET}.json" || true
echo ">> uploading ${HTML_OUT} to gs://${JOB_GCS_BUCKET}.html"
echo ">> public URL: ${REPORT_URL_BASE}/${JOB_GCS_BUCKET}.html"
gsutil -qm cp "${HTML_OUT}" "gs://${JOB_GCS_BUCKET}.html" || true
echo ">> uploading ${SUMMARY_OUT} to gs://${JOB_GCS_BUCKET}_summary.json"
echo ">> public URL: ${REPORT_URL_BASE}/${JOB_GCS_BUCKET}_summary.json"
gsutil -qm cp "${SUMMARY_OUT}" "gs://${JOB_GCS_BUCKET}_summary.json" || true
else
# Otherwise, put the results in a predictable spot so the upload job can find them
REPORTS_PATH=test_reports
mkdir -p "$REPORTS_PATH"
cp "${TEST_OUT}" "$REPORTS_PATH/out.txt"
cp "${JSON_OUT}" "$REPORTS_PATH/out.json"
cp "${HTML_OUT}" "$REPORTS_PATH/out.html"
cp "${SUMMARY_OUT}" "$REPORTS_PATH/summary.txt"
fi
echo ">> Cleaning up after ourselves ..."
@ -424,48 +471,14 @@ ${SUDO_PREFIX} rm -f "${KUBECONFIG}" || true
${SUDO_PREFIX} rm -f "${TEST_OUT}" || true
${SUDO_PREFIX} rm -f "${JSON_OUT}" || true
${SUDO_PREFIX} rm -f "${HTML_OUT}" || true
rmdir "${TEST_HOME}" || true
echo ">> ${TEST_HOME} completed at $(date)"
if [[ "${MINIKUBE_LOCATION}" == "master" ]]; then
exit $result
exit "$result"
fi
# retry_github_status provides reliable github status updates
function retry_github_status() {
local commit=$1
local context=$2
local state=$3
local token=$4
local target=$5
local desc=$6
# Retry in case we hit our GitHub API quota or fail other ways.
local attempt=0
local timeout=2
local code=-1
while [[ "${attempt}" -lt 8 ]]; do
local out=$(mktemp)
code=$(curl -o "${out}" -s --write-out "%{http_code}" -L -u minikube-bot:${token} \
"https://api.github.com/repos/kubernetes/minikube/statuses/${commit}" \
-H "Content-Type: application/json" \
-X POST \
-d "{\"state\": \"${state}\", \"description\": \"Jenkins: ${desc}\", \"target_url\": \"${target}\", \"context\": \"${context}\"}" || echo 999)
# 2xx HTTP codes
if [[ "${code}" =~ ^2 ]]; then
break
fi
cat "${out}" && rm -f "${out}"
echo "HTTP code ${code}! Retrying in ${timeout} .."
sleep "${timeout}"
attempt=$(( attempt + 1 ))
timeout=$(( timeout * 5 ))
done
}
retry_github_status "${COMMIT}" "${JOB_NAME}" "${status}" "${access_token}" "${public_log_url}" "${description}"
exit $result
exit "$result"

View File

@ -25,7 +25,22 @@ fi
VERSION_TO_INSTALL=${1}
INSTALL_PATH=${2}
ARCH=${ARCH:=amd64}
function current_arch() {
case $(arch) in
"x86_64")
echo "amd64"
;;
"aarch64")
echo "arm64"
;;
*)
echo "unexpected arch: $(arch). use amd64" 1>&2
echo "amd64"
;;
esac
}
ARCH=${ARCH:=$(current_arch)}
# installs or updates golang if right version doesn't exists
function check_and_install_golang() {
@ -52,20 +67,28 @@ function check_and_install_golang() {
# install_golang takes two parameters version and path to install.
function install_golang() {
echo "Installing golang version: $1 on $2"
pushd /tmp >/dev/null
local -r GO_VER="$1"
local -r GO_DIR="$2/go"
echo "Installing golang version: $GO_VER in $GO_DIR"
INSTALLOS=linux
if [[ "$OSTYPE" == "darwin"* ]]; then
INSTALLOS=darwin
fi
local -r GO_TGZ="go${GO_VER}.${INSTALLOS}-${ARCH}.tar.gz"
pushd /tmp
# using sudo because previously installed versions might have been installed by a different user.
# as it was the case on jenkins VM.
sudo curl -qL -O "https://storage.googleapis.com/golang/go${1}.${INSTALLOS}-${ARCH}.tar.gz" &&
sudo tar -xzf go${1}.${INSTALLOS}-amd64.tar.gz &&
sudo rm -rf "${2}/go" &&
sudo mv go "${2}/" && sudo chown -R $(whoami): ${2}/go
sudo rm -rf "$GO_TGZ"
curl -qL -O "https://storage.googleapis.com/golang/$GO_TGZ"
sudo rm -rf "$GO_DIR"
sudo mkdir -p "$GO_DIR"
sudo tar -C "$GO_DIR" --strip-components=1 -xzf "$GO_TGZ"
popd >/dev/null
echo "installed in $GO_DIR: $($GO_DIR/bin/go version)"
}
check_and_install_golang

View File

@ -0,0 +1,24 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eux -o pipefail
function install_gotestsum() {
sudo rm -f $(which gotestsum)
sudo PATH="$PATH" GOBIN="$GOROOT/bin" go install gotest.tools/gotestsum@v1.6.4
}
which gotestsum || install_gotestsum

View File

@ -46,13 +46,13 @@ if [[ -z $KIC_VERSION ]]; then
now=$(date +%s)
KV=$(egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f 2 | cut -d "-" -f 1)
GCR_REPO=gcr.io/k8s-minikube/kicbase-builds
DH_REPO=kicbase/build
DH_REPO=docker.io/kicbase/build
export KIC_VERSION=$KV-$now-$ghprbPullId
else
# Actual kicbase release here
release=true
GCR_REPO=${GCR_REPO:-gcr.io/k8s-minikube/kicbase}
DH_REPO=${DH_REPO:-kicbase/stable}
DH_REPO=${DH_REPO:-docker.io/kicbase/stable}
export KIC_VERSION
fi
GCR_IMG=${GCR_REPO}:${KIC_VERSION}
@ -67,16 +67,11 @@ CIBUILD=yes make push-kic-base-image | tee kic-logs.txt
ec=$?
if [ $ec -gt 0 ]; then
if [ "$release" = false ]; then
err=$(tail -100 kic-logs.txt)
gh pr comment ${ghprbPullId} --body "Hi ${ghprbPullAuthorLoginMention}, building a new kicbase image failed, with the error below:
<details>
<pre>
${err}
</pre>
</details>
Full logs are at https://storage.cloud.google.com/minikube-builds/logs/${ghprbPullId}/${ghprbActualCommit:0:7}/kic_image_build.txt
gh pr comment ${ghprbPullId} --body "Hi ${ghprbPullAuthorLoginMention}, building a new kicbase image failed.
See the logs at:
```
https://storage.cloud.google.com/minikube-builds/logs/${ghprbPullId}/kicbase-${BUILD_NUMBER}/kic_image_build.txt
```
"
fi
exit $ec

View File

@ -25,8 +25,9 @@
set -e
OS_ARCH="linux-amd64"
VM_DRIVER="docker"
OS="linux"
ARCH="amd64"
DRIVER="docker"
JOB_NAME="Docker_Linux"
CONTAINER_RUNTIME="docker"

View File

@ -32,7 +32,8 @@ docker rm -f -v "$(docker ps -aq)" >/dev/null 2>&1 || true
ARCH="arm64" \
OS="linux" \
VM_DRIVER="docker" \
DRIVER="docker" \
JOB_NAME="$JOB_NAME" \
CONTAINER_RUNTIME="docker" \
source ./run_tests.sh
EXTERNAL="yes" \
source ./common.sh

View File

@ -25,8 +25,9 @@
set -e
OS_ARCH="linux-amd64"
VM_DRIVER="docker"
OS="linux"
ARCH="amd64"
DRIVER="docker"
JOB_NAME="Docker_Linux_containerd"
CONTAINER_RUNTIME="containerd"

View File

@ -32,7 +32,8 @@ docker rm -f -v "$(docker ps -aq)" >/dev/null 2>&1 || true
ARCH="arm64" \
OS="linux" \
VM_DRIVER="docker" \
DRIVER="docker" \
JOB_NAME="$JOB_NAME" \
CONTAINER_RUNTIME="containerd" \
source ./run_tests.sh
EXTERNAL="yes" \
source ./common.sh

View File

@ -25,8 +25,9 @@
set -e
OS_ARCH="linux-amd64"
VM_DRIVER="docker"
OS="linux"
ARCH="amd64"
DRIVER="docker"
JOB_NAME="Docker_Linux_crio"
CONTAINER_RUNTIME="crio"

View File

@ -32,7 +32,8 @@ docker rm -f -v "$(docker ps -aq)" >/dev/null 2>&1 || true
ARCH="arm64" \
OS="linux" \
VM_DRIVER="docker" \
DRIVER="docker" \
JOB_NAME="$JOB_NAME" \
CONTAINER_RUNTIME="crio" \
source ./run_tests.sh
EXTERNAL="yes" \
source ./common.sh

View File

@ -25,8 +25,9 @@
set -e
OS_ARCH="linux-amd64"
VM_DRIVER="kvm2"
OS="linux"
ARCH="amd64"
DRIVER="kvm2"
JOB_NAME="KVM_Linux"
EXPECTED_DEFAULT_DRIVER="kvm2"

View File

@ -25,8 +25,9 @@
set -e
OS_ARCH="linux-amd64"
VM_DRIVER="kvm2"
OS="linux"
ARCH="amd64"
DRIVER="kvm2"
JOB_NAME="KVM_Linux_containerd"
CONTAINER_RUNTIME="containerd"

View File

@ -25,8 +25,9 @@
set -e
OS_ARCH="linux-amd64"
VM_DRIVER="kvm2"
OS="linux"
ARCH="amd64"
DRIVER="kvm2"
JOB_NAME="KVM_Linux_crio"
CONTAINER_RUNTIME="crio"

View File

@ -26,8 +26,9 @@
set -e
OS_ARCH="linux-amd64"
VM_DRIVER="none"
OS="linux"
ARCH="amd64"
DRIVER="none"
JOB_NAME="none_Linux"
EXTRA_START_ARGS="--bootstrapper=kubeadm"
EXPECTED_DEFAULT_DRIVER="kvm2"

View File

@ -25,8 +25,9 @@
set -e
OS_ARCH="linux-amd64"
VM_DRIVER="podman"
OS="linux"
ARCH="amd64"
DRIVER="podman"
JOB_NAME="Experimental_Podman_Linux"
CONTAINER_RUNTIME="containerd"

View File

@ -25,8 +25,9 @@
set -e
OS_ARCH="linux-amd64"
VM_DRIVER="virtualbox"
OS="linux"
ARCH="amd64"
DRIVER="virtualbox"
JOB_NAME="VirtualBox_Linux"
EXTRA_TEST_ARGS=""
EXPECTED_DEFAULT_DRIVER="kvm2"

View File

@ -46,6 +46,8 @@ make -j 16 \
out/minikube_${DEB_VER}_amd64.deb \
out/minikube_${DEB_VER}_arm64.deb \
out/docker-machine-driver-kvm2_$(make deb_version_base).deb \
out/docker-machine-driver-kvm2_${DEB_VER}_amd64.deb \
out/docker-machine-driver-kvm2_${DEB_VER}_arm64.deb \
&& failed=$? || failed=$?
BUILT_VERSION=$("out/minikube-$(go env GOOS)-$(go env GOARCH)" version)
@ -70,7 +72,7 @@ fi
cp -r test/integration/testdata out/
# Don't upload the buildroot artifacts if they exist
rm -r out/buildroot || true
rm -rf out/buildroot
# At this point, the out directory contains the jenkins scripts (populated by jenkins),
# testdata, and our build output. Push the changes to GCS so that worker nodes can re-use them.

View File

@ -26,17 +26,12 @@
set -eux -o pipefail
if [ "${ghprbPullId}" == "master" ]; then
echo "not setting github status for continuous builds"
exit 0
fi
jobs=(
'Hyperkit_macOS'
# 'Hyper-V_Windows'
# 'VirtualBox_Linux'
# 'VirtualBox_macOS'
'VirtualBox_Windows'
# 'VirtualBox_Windows'
# 'KVM-GPU_Linux' - Disabled
'KVM_Linux'
'KVM_Linux_containerd'
@ -54,6 +49,14 @@ jobs=(
'Docker_Cloud_Shell'
)
STARTED_LIST_REMOTE="gs://minikube-builds/logs/${ghprbPullId}/${BUILD_NUMBER}/started_environments.txt"
printf "%s\n" "${jobs[@]}" | gsutil cp - "${STARTED_LIST_REMOTE}"
if [ "${ghprbPullId}" == "master" ]; then
echo "not setting github status for continuous builds"
exit 0
fi
# retry_github_status provides reliable github status updates
function retry_github_status() {
local commit=$1
@ -88,9 +91,7 @@ function retry_github_status() {
done
}
SHORT_COMMIT=${ghprbActualCommit:0:7}
for j in ${jobs[@]}; do
retry_github_status "${ghprbActualCommit}" "${j}" "pending" "${access_token}" \
"https://storage.googleapis.com/minikube-builds/logs/${ghprbPullId}/${SHORT_COMMIT}/${j}.pending"
"https://storage.googleapis.com/minikube-builds/logs/${ghprbPullId}/${BUILD_NUMBER}/${j}.pending"
done

View File

@ -28,18 +28,11 @@ set -e
ARCH="amd64"
OS="darwin"
VM_DRIVER="docker"
DRIVER="docker"
JOB_NAME="Docker_macOS"
EXTRA_TEST_ARGS=""
EXPECTED_DEFAULT_DRIVER="docker"
# fix mac os as a service on mac os
# https://github.com/docker/for-mac/issues/882#issuecomment-506372814
#osascript -e 'quit app "Docker"'
#/Applications/Docker.app/Contents/MacOS/Docker --quit-after-install --unattended || true
#osascript -e 'quit app "Docker"'
#/Applications/Docker.app/Contents/MacOS/Docker --unattended &
EXTERNAL="yes"
begin=$(date +%s)
while [ -z "$(docker info 2> /dev/null )" ];
@ -59,4 +52,4 @@ install cron/cleanup_and_reboot_Darwin.sh $HOME/cleanup_and_reboot.sh || echo "F
echo "*/30 * * * * $HOME/cleanup_and_reboot.sh" | crontab
crontab -l
source run_tests.sh
source common.sh

View File

@ -28,15 +28,15 @@ set -ex
ARCH="amd64"
OS="darwin"
VM_DRIVER="hyperkit"
DRIVER="hyperkit"
JOB_NAME="Hyperkit_macOS"
EXTRA_TEST_ARGS=""
EXPECTED_DEFAULT_DRIVER="hyperkit"
EXTERNAL="yes"
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
install cron/cleanup_and_reboot_Darwin.sh $HOME/cleanup_and_reboot.sh || echo "FAILED TO INSTALL CLEANUP"
echo "*/30 * * * * $HOME/cleanup_and_reboot.sh" | crontab
crontab -l
source run_tests.sh
source common.sh

View File

@ -24,9 +24,9 @@
# access_token: The Github API access token. Injected by the Jenkins credential provider.
set -e
OS_ARCH="darwin-amd64"
VM_DRIVER="virtualbox"
OS="darwin"
ARCH="amd64"
DRIVER="virtualbox"
JOB_NAME="VirtualBox_macOS"
EXTRA_START_ARGS="--bootstrapper=kubeadm"
# hyperkit behaves better, so it has higher precedence.

View File

@ -48,7 +48,10 @@ make verify-iso
env BUILD_IN_DOCKER=y \
make -j 16 \
all \
out/minikube-linux-arm64 \
out/minikube-linux-arm64.tar.gz \
out/minikube-darwin-arm64 \
out/minikube-darwin-arm64.tar.gz \
out/minikube-installer.exe \
"out/minikube_${DEB_VERSION}-${DEB_REVISION}_amd64.deb" \
"out/minikube_${DEB_VERSION}-${DEB_REVISION}_arm64.deb" \
@ -61,6 +64,7 @@ env BUILD_IN_DOCKER=y \
"out/minikube-${RPM_VERSION}-${RPM_REVISION}.ppc64le.rpm" \
"out/minikube-${RPM_VERSION}-${RPM_REVISION}.s390x.rpm" \
"out/docker-machine-driver-kvm2_${DEB_VERSION}-${DEB_REVISION}_amd64.deb" \
"out/docker-machine-driver-kvm2_${DEB_VERSION}-${DEB_REVISION}_arm64.deb" \
"out/docker-machine-driver-kvm2-${RPM_VERSION}-${RPM_REVISION}.x86_64.rpm"
# check if 'commit: <commit-id>' line contains '-dirty' commit suffix
@ -75,6 +79,7 @@ fi
# Don't upload temporary copies, avoid unused duplicate files in the release storage
rm -f out/minikube-linux-x86_64
rm -f out/minikube-linux-i686
rm -f out/minikube-linux-aarch64
rm -f out/minikube-linux-armhf
rm -f out/minikube-linux-armv7hl

View File

@ -51,7 +51,7 @@ cd "${SRC_DIR}"
brew bump-formula-pr \
--strict minikube \
--revision="${revision}" \
--message="This PR was automatically created by minikube release scripts. Contact @tstromberg with any questions." \
--message="This PR was automatically created by minikube release scripts. Contact @medyagh with any questions." \
--no-browse \
--tag="${TAG}" \
&& status=0 || status=$?

View File

@ -1,452 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script downloads the test files from the build bucket and makes some executable.
# The script expects the following env variables:
# OS: The operating system
# ARCH: The architecture
# VM_DRIVER: the driver to use for the test
# CONTAINER_RUNTIME: the container runtime to use for the test
# EXTRA_START_ARGS: additional flags to pass into minikube start
# EXTRA_TEST_ARGS: additional flags to pass into go test
# JOB_NAME: the name of the logfile and check name to update on github
readonly OS_ARCH="${OS}-${ARCH}"
readonly TEST_ROOT="${HOME}/minikube-integration"
readonly TEST_HOME="${TEST_ROOT}/${OS_ARCH}-${VM_DRIVER}-${CONTAINER_RUNTIME}-${MINIKUBE_LOCATION}-$$-${COMMIT}"
export GOPATH="$HOME/go"
export KUBECONFIG="${TEST_HOME}/kubeconfig"
export PATH=$PATH:"/usr/local/bin/:/usr/local/go/bin/:$GOPATH/bin"
readonly TIMEOUT=${1:-120m}
# We need pstree for the restart cronjobs
if [ "$(uname)" != "Darwin" ]; then
sudo apt-get -y install lsof psmisc
else
brew install pstree coreutils pidof
ln -s /usr/local/bin/gtimeout /usr/local/bin/timeout || true
fi
# installing golang so we could do go get for gopogh
sudo ARCH="$ARCH"./installers/check_install_golang.sh "1.16" "/usr/local" || true
# install docker and kubectl if not present
sudo ARCH="$ARCH" ./installers/check_install_docker.sh
# let's just clean all docker artifacts up
docker system prune --force --volumes || true
docker system df || true
echo ">> Starting at $(date)"
echo ""
echo "arch: ${OS_ARCH}"
echo "build: ${MINIKUBE_LOCATION}"
echo "driver: ${VM_DRIVER}"
echo "runtime: ${CONTAINER_RUNTIME}"
echo "job: ${JOB_NAME}"
echo "test home: ${TEST_HOME}"
echo "sudo: ${SUDO_PREFIX}"
echo "kernel: $(uname -v)"
echo "uptime: $(uptime)"
# Setting KUBECONFIG prevents the version check from erroring out due to permission issues
echo "kubectl: $(env KUBECONFIG=${TEST_HOME} kubectl version --client --short=true)"
echo "docker: $(docker version --format '{{ .Client.Version }}')"
echo "podman: $(sudo podman version --format '{{.Version}}' || true)"
echo "go: $(go version || true)"
case "${VM_DRIVER}" in
kvm2)
echo "virsh: $(virsh --version)"
;;
virtualbox)
echo "vbox: $(vboxmanage --version)"
;;
esac
echo ""
mkdir -p out/ testdata/
# Install gsutil if necessary.
if ! type -P gsutil >/dev/null; then
if [[ ! -x "out/gsutil/gsutil" ]]; then
echo "Installing gsutil to $(pwd)/out ..."
curl -s https://storage.googleapis.com/pub/gsutil.tar.gz | tar -C out/ -zxf -
fi
PATH="$(pwd)/out/gsutil:$PATH"
fi
# Add the out/ directory to the PATH, for using new drivers.
export PATH="$(pwd)/out/":$PATH
echo
echo ">> Downloading test inputs from ${MINIKUBE_LOCATION} ..."
gsutil -qm cp \
"gs://minikube-builds/${MINIKUBE_LOCATION}/minikube-${OS_ARCH}" \
"gs://minikube-builds/${MINIKUBE_LOCATION}/docker-machine-driver"-* \
"gs://minikube-builds/${MINIKUBE_LOCATION}/e2e-${OS_ARCH}" out
gsutil -qm cp -r "gs://minikube-builds/${MINIKUBE_LOCATION}/testdata"/* testdata/
gsutil -qm cp "gs://minikube-builds/${MINIKUBE_LOCATION}/gvisor-addon" testdata/
# Set the executable bit on the e2e binary and out binary
export MINIKUBE_BIN="out/minikube-${OS_ARCH}"
export E2E_BIN="out/e2e-${OS_ARCH}"
chmod +x "${MINIKUBE_BIN}" "${E2E_BIN}" out/docker-machine-driver-*
"${MINIKUBE_BIN}" version
procs=$(pgrep "minikube-${OS_ARCH}|e2e-${OS_ARCH}" || true)
if [[ "${procs}" != "" ]]; then
echo "Warning: found stale test processes to kill:"
ps -f -p ${procs} || true
kill ${procs} || true
kill -9 ${procs} || true
fi
# Quickly notice misconfigured test roots
mkdir -p "${TEST_ROOT}"
# Cleanup stale test outputs.
echo
echo ">> Cleaning up after previous test runs ..."
for test_path in ${TEST_ROOT}; do
ls -lad "${test_path}" || continue
echo "* Cleaning stale test path: ${test_path}"
for tunnel in $(find ${test_path} -name tunnels.json -type f); do
env MINIKUBE_HOME="$(dirname ${tunnel})" ${MINIKUBE_BIN} tunnel --cleanup || true
done
for home in $(find ${test_path} -name .minikube -type d); do
env MINIKUBE_HOME="$(dirname ${home})" ${MINIKUBE_BIN} delete --all || true
sudo rm -Rf "${home}"
done
for kconfig in $(find ${test_path} -name kubeconfig -type f); do
sudo rm -f "${kconfig}"
done
## ultimate shotgun clean up docker after we tried all
docker rm -f -v $(docker ps -aq) >/dev/null 2>&1 || true
# Be very specific to avoid accidentally deleting other items, like wildcards or devices
if [[ -d "${test_path}" ]]; then
rm -Rf "${test_path}" || true
elif [[ -f "${test_path}" ]]; then
rm -f "${test_path}" || true
fi
done
function cleanup_procs() {
# sometimes tests left over zombie procs that won't exit
# for example:
# jenkins 20041 0.0 0.0 0 0 ? Z Aug19 0:00 [minikube-linux-] <defunct>
pgrep docker > d.pids
zombie_defuncts=$(ps -A -ostat,ppid | grep -v -f d.pids | awk '/[zZ]/ && !a[$2]++ {print $2}')
if [[ "${zombie_defuncts}" != "" ]]; then
echo "Found zombie defunct procs to kill..."
ps -f -p ${zombie_defuncts} || true
kill ${zombie_defuncts} || true
fi
if type -P virsh; then
virsh -c qemu:///system list --all --uuid \
| xargs -I {} sh -c "virsh -c qemu:///system destroy {}; virsh -c qemu:///system undefine {}" \
|| true
echo ">> virsh VM list after clean up (should be empty):"
virsh -c qemu:///system list --all || true
fi
if type -P vboxmanage; then
killall VBoxHeadless || true
sleep 1
killall -9 VBoxHeadless || true
for guid in $(vboxmanage list vms | grep -Eo '\{[a-zA-Z0-9-]+\}'); do
echo "- Removing stale VirtualBox VM: $guid"
vboxmanage startvm "${guid}" --type emergencystop || true
vboxmanage unregistervm "${guid}" || true
done
ifaces=$(vboxmanage list hostonlyifs | grep -E "^Name:" | awk '{ print $2 }')
for if in $ifaces; do
vboxmanage hostonlyif remove "${if}" || true
done
echo ">> VirtualBox VM list after clean up (should be empty):"
vboxmanage list vms || true
echo ">> VirtualBox interface list after clean up (should be empty):"
vboxmanage list hostonlyifs || true
fi
if type -P hdiutil; then
hdiutil info | grep -E "/dev/disk[1-9][^s]" || true
hdiutil info \
| grep -E "/dev/disk[1-9][^s]" \
| awk '{print $1}' \
| xargs -I {} sh -c "hdiutil detach {}" \
|| true
fi
# cleaning up stale hyperkits
if type -P hyperkit; then
for pid in $(pgrep hyperkit); do
echo "Killing stale hyperkit $pid"
ps -f -p $pid || true
kill $pid || true
kill -9 $pid || true
done
fi
if [[ "${VM_DRIVER}" == "hyperkit" ]]; then
if [[ -e out/docker-machine-driver-hyperkit ]]; then
sudo chown root:wheel out/docker-machine-driver-hyperkit || true
sudo chmod u+s out/docker-machine-driver-hyperkit || true
fi
fi
kprocs=$(pgrep kubectl || true)
if [[ "${kprocs}" != "" ]]; then
echo "error: killing hung kubectl processes ..."
ps -f -p ${kprocs} || true
sudo -E kill ${kprocs} || true
fi
# clean up none drivers binding on 8443
none_procs=$(sudo lsof -i :8443 | tail -n +2 | awk '{print $2}' || true)
if [[ "${none_procs}" != "" ]]; then
echo "Found stale api servers listening on 8443 processes to kill: "
for p in $none_procs
do
echo "Kiling stale none driver: $p"
sudo -E ps -f -p $p || true
sudo -E kill $p || true
sudo -E kill -9 $p || true
done
fi
}
function cleanup_stale_routes() {
local show="netstat -rn -f inet"
local del="sudo route -n delete"
if [[ "$(uname)" == "Linux" ]]; then
show="ip route show"
del="sudo ip route delete"
fi
local troutes=$($show | awk '{ print $1 }' | grep 10.96.0.0 || true)
for route in ${troutes}; do
echo "WARNING: deleting stale tunnel route: ${route}"
$del "${route}" || true
done
}
cleanup_procs || true
cleanup_stale_routes || true
mkdir -p "${TEST_HOME}"
export MINIKUBE_HOME="${TEST_HOME}/.minikube"
# Build the gvisor image so that we can integration test changes to pkg/gvisor
chmod +x ./testdata/gvisor-addon
# skipping gvisor mac because ofg https://github.com/kubernetes/minikube/issues/5137
if [ "$(uname)" != "Darwin" ]; then
# Should match GVISOR_IMAGE_VERSION in Makefile
docker build -t gcr.io/k8s-minikube/gvisor-addon:2 -f testdata/gvisor-addon-Dockerfile ./testdata
fi
readonly LOAD=$(uptime | egrep -o "load average.*: [0-9]+" | cut -d" " -f3)
if [[ "${LOAD}" -gt 2 ]]; then
echo ""
echo "********************** LOAD WARNING ********************************"
echo "Load average is very high (${LOAD}), which may cause failures. Top:"
if [[ "$(uname)" == "Darwin" ]]; then
# Two samples, macOS does not calculate CPU usage on the first one
top -l 2 -o cpu -n 5 | tail -n 15
else
top -b -n1 | head -n 15
fi
echo "********************** LOAD WARNING ********************************"
echo "Sleeping 30s to see if load goes down ...."
sleep 30
uptime
fi
readonly TEST_OUT="${TEST_HOME}/testout.txt"
readonly JSON_OUT="${TEST_HOME}/test.json"
readonly HTML_OUT="${TEST_HOME}/test.html"
readonly SUMMARY_OUT="${TEST_HOME}/test_summary.json"
e2e_start_time="$(date -u +%s)"
echo ""
echo ">> Starting ${E2E_BIN} at $(date)"
set -x
if test -f "${TEST_OUT}"; then
rm "${TEST_OUT}" || true # clean up previous runs of same build
fi
touch "${TEST_OUT}"
if [ ! -z "${CONTAINER_RUNTIME}" ]
then
EXTRA_START_ARGS="${EXTRA_START_ARGS} --container-runtime=${CONTAINER_RUNTIME}"
fi
${SUDO_PREFIX}${E2E_BIN} \
-minikube-start-args="--driver=${VM_DRIVER} ${EXTRA_START_ARGS}" \
-test.timeout=${TIMEOUT} -test.v \
${EXTRA_TEST_ARGS} \
-binary="${MINIKUBE_BIN}" 2>&1 | tee "${TEST_OUT}"
result=${PIPESTATUS[0]} # capture the exit code of the first cmd in pipe.
set +x
echo ">> ${E2E_BIN} exited with ${result} at $(date)"
echo ""
if [[ $result -eq 0 ]]; then
status="success"
echo "minikube: SUCCESS"
else
status="failure"
echo "minikube: FAIL"
fi
## caclucate the time took to finish running e2e binary test.
e2e_end_time="$(date -u +%s)"
elapsed=$(($e2e_end_time-$e2e_start_time))
min=$(($elapsed/60))
sec=$(tail -c 3 <<< $((${elapsed}00/60)))
elapsed=$min.$sec
echo ">> Attmpting to convert test logs to json"
if test -f "${JSON_OUT}"; then
rm "${JSON_OUT}" || true # clean up previous runs of same build
fi
touch "${JSON_OUT}"
# Generate JSON output
echo ">> Running go test2json"
go tool test2json -t < "${TEST_OUT}" > "${JSON_OUT}" || true
if ! type "jq" > /dev/null; then
echo ">> Installing jq"
if [ "$(uname)" != "Darwin" ]; then
curl -LO https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 && sudo install jq-linux64 /usr/local/bin/jq
else
curl -LO https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 && sudo install jq-osx-amd64 /usr/local/bin/jq
fi
fi
echo ">> Installing gopogh"
curl -LO "https://github.com/medyagh/gopogh/releases/download/v0.6.0/gopogh-${OS_ARCH}"
sudo install "gopogh-${OS_ARCH}" /usr/local/bin/gopogh
echo ">> Running gopogh"
if test -f "${HTML_OUT}"; then
rm "${HTML_OUT}" || true # clean up previous runs of same build
fi
touch "${HTML_OUT}"
touch "${SUMMARY_OUT}"
gopogh_status=$(gopogh -in "${JSON_OUT}" -out_html "${HTML_OUT}" -out_summary "${SUMMARY_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}") || true
fail_num=$(echo $gopogh_status | jq '.NumberOfFail')
test_num=$(echo $gopogh_status | jq '.NumberOfTests')
pessimistic_status="${fail_num} / ${test_num} failures"
description="completed with ${status} in ${elapsed} minute(s)."
if [ "$status" = "failure" ]; then
description="completed with ${pessimistic_status} in ${elapsed} minute(s)."
fi
echo "$description"
REPORTS_PATH=test_reports
mkdir -p "$REPORTS_PATH"
cp "${TEST_OUT}" "$REPORTS_PATH/out.txt"
cp "${JSON_OUT}" "$REPORTS_PATH/out.json"
cp "${HTML_OUT}" "$REPORTS_PATH/out.html"
cp "${SUMMARY_OUT}" "$REPORTS_PATH/summary.txt"
echo ">> Cleaning up after ourselves ..."
timeout 3m ${SUDO_PREFIX}${MINIKUBE_BIN} tunnel --cleanup || true
timeout 5m ${SUDO_PREFIX}${MINIKUBE_BIN} delete --all --purge >/dev/null 2>/dev/null || true
cleanup_stale_routes || true
${SUDO_PREFIX} rm -Rf "${MINIKUBE_HOME}" || true
${SUDO_PREFIX} rm -f "${KUBECONFIG}" || true
rmdir "${TEST_HOME}" || true
echo ">> ${TEST_HOME} completed at $(date)"
if [[ "${MINIKUBE_LOCATION}" == "master" ]]; then
exit "$result"
fi
public_log_url="https://storage.googleapis.com/minikube-builds/logs/${MINIKUBE_LOCATION}/${COMMIT:0:7}/${JOB_NAME}.html"
# retry_github_status provides reliable github status updates
function retry_github_status() {
local commit=$1
local context=$2
local state=$3
local token=$4
local target=$5
local desc=$6
# Retry in case we hit our GitHub API quota or fail other ways.
local attempt=0
local timeout=2
local code=-1
echo "set GitHub status $context to $desc"
while [[ "${attempt}" -lt 8 ]]; do
local out=$(mktemp)
code=$(curl -o "${out}" -s --write-out "%{http_code}" -L -u minikube-bot:${token} \
"https://api.github.com/repos/kubernetes/minikube/statuses/${commit}" \
-H "Content-Type: application/json" \
-X POST \
-d "{\"state\": \"${state}\", \"description\": \"Jenkins: ${desc}\", \"target_url\": \"${target}\", \"context\": \"${context}\"}" || echo 999)
# 2xx HTTP codes
if [[ "${code}" =~ ^2 ]]; then
break
fi
cat "${out}" && rm -f "${out}"
echo "HTTP code ${code}! Retrying in ${timeout} .."
sleep "${timeout}"
attempt=$(( attempt + 1 ))
timeout=$(( timeout * 5 ))
done
}
retry_github_status "${COMMIT}" "${JOB_NAME}" "${status}" "${access_token}" "${public_log_url}" "${description}"
exit "$result"

View File

@ -0,0 +1,43 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -x
if docker system info > /dev/null 2>&1; then
echo "Docker is already running, exiting"
exit 0
fi
# kill docker first
osascript -e 'quit app "Docker"'
# wait 2 minutes for it to start back up
timeout=120
elapsed=0
echo "Starting Docker Desktop..."
open --background -a Docker
echo "Waiting at most two minutes..."
while ! docker system info > /dev/null 2>&1;
do
sleep 1
elapsed=$((elapsed+1))
if [ $elapsed -gt $timeout ]; then
echo "Start Docker Desktop failed"
exit 1
fi
done
echo "Docker Desktop started!"

Some files were not shown because too many files have changed in this diff Show More