Merge branch 'master' into pipe-regex

pull/10510/head
Thomas Strömberg 2021-02-19 09:14:01 -08:00 committed by GitHub
commit 4ff577d49f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
277 changed files with 5948 additions and 1643 deletions

78
.github/workflows/build.yml vendored Normal file
View File

@ -0,0 +1,78 @@
name: build
on:
push:
branches:
- master
paths:
- "**.go"
- "Makefile"
- "!deploy/kicbase/**"
- "!deploy/iso/**"
env:
GOPROXY: https://proxy.golang.org
jobs:
build_minikube:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: '1.15.5'
stable: true
- name: Download Dependencies
run: go mod download
- name: Build Binaries
run: |
make cross
make e2e-cross
cp -r test/integration/testdata ./out
whoami
echo github ref $GITHUB_REF
echo workflow $GITHUB_WORKFLOW
echo home $HOME
echo event name $GITHUB_EVENT_NAME
echo workspace $GITHUB_WORKSPACE
echo "end of debug stuff"
echo $(which jq)
- uses: actions/upload-artifact@v1
with:
name: minikube_binaries
path: out
lint:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: '1.15.5'
stable: true
- name: Install libvirt
run: |
sudo apt-get update
sudo apt-get install -y libvirt-dev
- name: Download Dependencies
run: go mod download
- name: Lint
env:
TESTSUITE: lintall
run: make test
continue-on-error: false
unit_test:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: '1.15.5'
stable: true
- name: Install libvirt
run: |
sudo apt-get update
sudo apt-get install -y libvirt-dev
- name: Download Dependencies
run: go mod download
- name: Unit Test
env:
TESTSUITE: unittest
run: make test
continue-on-error: false

View File

@ -7,7 +7,7 @@ env:
GOPROXY: https://proxy.golang.org
jobs:
build_test_iso:
runs-on: [self-hosted, debian9, gcp]
runs-on: [self-hosted, debian9, baremetal, equinix]
steps:
- name: Clean up workspace
shell: bash
@ -82,7 +82,7 @@ jobs:
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
sudo apt-get install -y jq
- name: Run Integration Test

View File

@ -1,121 +0,0 @@
name: KIC_IMAGE
on:
pull_request:
paths:
- "deploy/kicbase/**"
env:
GOPROXY: https://proxy.golang.org
jobs:
build_test_kic_image:
runs-on: [self-hosted, debian9, gcp]
steps:
- name: Clean up
shell: bash
run: |
pwd
ls -lah
rm -rf out
ls -lah
df -h
sudo rm -f /etc/cron.hourly/cleanup_and_reboot || true
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: '1.15.5'
stable: true
- name: Download Dependencies
run: go mod download
- name: Build Binaries
run: |
sudo apt-get update
sudo apt-get install -y make build-essential
make linux
make e2e-linux-amd64
cp -r test/integration/testdata ./out
whoami
echo github ref $GITHUB_REF
echo workflow $GITHUB_WORKFLOW
echo home $HOME
echo event name $GITHUB_EVENT_NAME
echo workspace $GITHUB_WORKSPACE
echo "end of debug stuff"
echo $(which jq)
- name: Build Image
run: |
docker images
make kic-base-image
docker images
- name: Info
shell: bash
run: |
hostname
uname -r
lsb_release -a
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
sudo apt-get install -y jq
rm -f gopogh-linux-amd64 || true
- name: Run Integration Test
continue-on-error: false
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
KIC_VERSION=$(egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2)
KIC_IMG_HEAD="local/kicbase:${KIC_VERSION}-snapshot"
cd out
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args="--vm-driver=docker --base-image=${KIC_IMG_HEAD}" -test.v -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
- name: Generate HTML Report
shell: bash
run: |
cd out
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(/usr/local/bin/gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail' || true)
TestsNum=$(echo $STAT | jq '.NumberOfTests' || true)
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
echo 'STAT<<EOF' >> $GITHUB_ENV
echo "${STAT}" >> $GITHUB_ENV
echo 'EOF' >> $GITHUB_ENV
- uses: actions/upload-artifact@v1
with:
name: kic_image_functional_test_docker_ubuntu
path: out/report
- name: The End Result build_test_kic_image_docker_ubuntu
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*******************${numPass} Passes :) *******************"
echo $STAT | jq '.PassedTests' || true
echo "*******************************************************"
echo "---------------- ${numFail} Failures :( ----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
numPass=$(echo $STAT | jq '.NumberOfPass')
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
if [ "$numPass" -eq 0 ];then echo "*** 0 Passed! ***";exit 2;fi
if [ "$numPass" -eq 0 ];then echo "*** Passed! ***";exit 0;fi

View File

@ -120,7 +120,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -205,7 +205,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Install docker
shell: bash
@ -350,7 +350,7 @@ jobs:
continue-on-error: true
shell: powershell
run: |
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
@ -487,7 +487,7 @@ jobs:
shell: powershell
run: |
$ErrorActionPreference = "SilentlyContinue"
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
@ -592,7 +592,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -651,6 +651,313 @@ jobs:
if [ "$numPass" -eq 0 ];then echo "*** 0 Passed! ***";exit 2;fi
if [ "$numPass" -lt 26 ];then echo "*** Failed to pass at least 26 ! ***";exit 2;fi
if [ "$numPass" -eq 0 ];then echo "*** Passed! ***";exit 0;fi
scheduled_stop_docker_windows:
needs: [build_minikube]
env:
TIME_ELAPSED: time
JOB_NAME: "scheduled_stop_docker_windows"
GOPOGH_RESULT: ""
STAT: ""
runs-on: [self-hosted, windows-10-ent, 8CPUs]
steps:
- name: Clean up
continue-on-error: true
shell: powershell
run: |
echo $env:computerName
ls
$ErrorActionPreference = "SilentlyContinue"
cd minikube_binaries
ls
$env:KUBECONFIG="${pwd}\testhome\kubeconfig"
$env:MINIKUBE_HOME="${pwd}\testhome"
.\minikube-windows-amd64.exe delete --all --purge
Get-VM | Where-Object {$_.Name -ne "DockerDesktopVM"} | Foreach {
.\minikube-windows-amd64.exe delete -p $_.Name
Suspend-VM $_.Name
Stop-VM $_.Name -Force
Remove-VM $_.Name -Force
}
cd ..
Remove-Item minikube_binaries -Force -Recurse
ls
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Start Docker Desktop
shell: powershell
continue-on-error: true
run: |
$ErrorActionPreference = "SilentlyContinue"
docker ps 2>&1 | Out-Null
$docker_running = $?
if (!$docker_running) {
Write-Output "Starting Docker as an administrator"
Start-Process 'C:/Program Files/Docker/Docker/Docker Desktop.exe' -Verb runAs
}
while (!$docker_running) {
Start-Sleep 5
docker ps 2>&1 | Out-Null
$docker_running = $?
}
Write-Output "Docker is running"
docker system prune -f
- name: Info
shell: powershell
run: |
echo $env:computername
echo "------------------------"
docker info
echo "------------------------"
docker volume ls
echo "------------------------"
docker system info --format '{{json .}}'
echo "------------------------"
- uses: actions/setup-go@v2
with:
go-version: '1.15.5'
stable: true
- name: Install tools
continue-on-error: true
shell: powershell
run: |
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
if (Test-Path 'C:\Program Files\Docker\Docker\resources\bin\kubectl.exe') { Remove-Item 'C:\Program Files\Docker\Docker\resources\bin\kubectl.exe' };
- name: Run Integration Test in powershell
shell: powershell
run: |
cd minikube_binaries
New-Item -Force -Path "report" -ItemType Directory
New-Item -Force -Path "testhome" -ItemType Directory
$START_TIME=(GET-DATE)
$env:KUBECONFIG="${pwd}\testhome\kubeconfig"
$env:MINIKUBE_HOME="${pwd}\testhome"
$ErrorActionPreference = "SilentlyContinue"
.\e2e-windows-amd64.exe --minikube-start-args="--driver=docker" --test.timeout=15m --timeout-multiplier=1 --test.v --test.run=TestScheduledStopWindows --binary=./minikube-windows-amd64.exe | Tee-Object -FilePath ".\report\testout.txt"
$END_TIME=(GET-DATE)
echo $END_TIME
$DURATION=(NEW-TIMESPAN -Start $START_TIME -End $END_TIME)
echo $DURATION
$SECS=($DURATION.TotalSeconds)
$MINS=($DURATION.TotalMinutes)
$T_ELAPSED="$MINS m $SECS s"
echo "----"
echo $T_ELAPSED
echo "----"
echo "TIME_ELAPSED=$T_ELAPSED" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
scheduled_stop_hyperv_windows:
needs: [build_minikube]
env:
TIME_ELAPSED: time
JOB_NAME: "scheduled_stop_hyperv_windows"
GOPOGH_RESULT: ""
runs-on: [self-hosted, windows-10-ent, Standard_D16s_v3, hyperv]
steps:
- name: Clean up
continue-on-error: true
shell: powershell
run: |
echo $env:computerName
ls
$ErrorActionPreference = "SilentlyContinue"
cd minikube_binaries
ls
$env:KUBECONFIG="${pwd}\testhome\kubeconfig"
$env:MINIKUBE_HOME="${pwd}\testhome"
.\minikube-windows-amd64.exe delete --all --purge
Get-VM | Where-Object {$_.Name -ne "DockerDesktopVM"} | Foreach {
Stop-VM -Name $_.Name -Force
Remove-VM $_.Name -Force
}
cd ..
Remove-Item minikube_binaries -Force -Recurse
ls
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Start Docker Desktop
shell: powershell
continue-on-error: true
run: |
$ErrorActionPreference = "SilentlyContinue"
docker ps 2>&1 | Out-Null
$docker_running = $?
if (!$docker_running) {
Write-Output "Starting Docker as an administrator"
Start-Process 'C:/Program Files/Docker/Docker/Docker Desktop.exe' -Verb runAs
}
while (!$docker_running) {
Start-Sleep 5
docker ps 2>&1 | Out-Null
$docker_running = $?
}
Write-Output "Docker is running"
docker system prune -f
- name: Info
continue-on-error: true
shell: powershell
run: |
$ErrorActionPreference = "SilentlyContinue"
cd minikube_binaries
ls
echo $env:computername
Get-WmiObject -class Win32_ComputerSystem
- uses: actions/setup-go@v2
with:
go-version: '1.15.5'
stable: true
- name: Install tools
continue-on-error: true
shell: powershell
run: |
$ErrorActionPreference = "SilentlyContinue"
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
if (Test-Path 'C:\Program Files\Docker\Docker\resources\bin\kubectl.exe') { Remove-Item 'C:\Program Files\Docker\Docker\resources\bin\kubectl.exe' };
- name: Run Integration Test in powershell
shell: powershell
run: |
cd minikube_binaries
New-Item -Force -Path "report" -ItemType Directory
New-Item -Force -Path "testhome" -ItemType Directory
$START_TIME=(GET-DATE)
$env:KUBECONFIG="${pwd}\testhome\kubeconfig"
$env:MINIKUBE_HOME="${pwd}\testhome"
$ErrorActionPreference = "SilentlyContinue"
.\e2e-windows-amd64.exe --minikube-start-args="--driver=hyperv" --test.timeout=20m --timeout-multiplier=1.5 --test.v --test.run=TestScheduledStopWindows --binary=./minikube-windows-amd64.exe | Tee-Object -FilePath ".\report\testout.txt"
$END_TIME=(GET-DATE)
echo $END_TIME
$DURATION=(NEW-TIMESPAN -Start $START_TIME -End $END_TIME)
echo $DURATION
$SECS=($DURATION.TotalSeconds)
$MINS=($DURATION.TotalMinutes)
$T_ELAPSED="$MINS m $SECS s"
echo "----"
echo $T_ELAPSED
echo "----"
echo "TIME_ELAPSED=$T_ELAPSED" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
functional_docker_ubuntu_arm64:
needs: [ build_minikube ]
runs-on: [ self-hosted, arm64 ]
env:
TIME_ELAPSED: time
JOB_NAME: "functional_docker_ubuntu_arm64"
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/arm64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-arm64
sudo install gopogh-linux-arm64 /usr/local/bin/gopogh
- name: Install tools
shell: bash
run: |
sudo apt update
sudo apt install -y jq docker
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info --format='{{json .}}'|| true
echo "--------------------------"
docker ps || true
echo "--------------------------"
whoami || true
echo "--------------------------"
hostname || true
echo "--------------------------"
# go 1.14.6+ is needed because of this bug https://github.com/golang/go/issues/39308
- uses: actions/setup-go@v2
with:
go-version: '1.15.2'
stable: true
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: false
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-arm64 -minikube-start-args=--vm-driver=docker -test.run TestFunctional -test.timeout=10m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-arm64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
echo 'STAT<<EOF' >> $GITHUB_ENV
echo "${STAT}" >> $GITHUB_ENV
echo 'EOF' >> $GITHUB_ENV
- uses: actions/upload-artifact@v1
with:
name: functional_docker_ubuntu_arm64
path: minikube_binaries/report
- name: The End Result - functional_docker_ubuntu_arm64
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*******************${numPass} Passes :) *******************"
echo $STAT | jq '.PassedTests' || true
echo "*******************************************************"
echo "---------------- ${numFail} Failures :( ----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
if [ "$numPass" -eq 0 ];then echo "*** 0 Passed! ***";exit 2;fi
if [ "$numPass" -lt 20 ];then echo "*** Failed to pass at least 20 ! ***";exit 2;fi
if [ "$numPass" -eq 0 ];then echo "*** Passed! ***";exit 0;fi
addons_certs_docker_ubuntu:
runs-on: ubuntu-18.04
env:
@ -689,7 +996,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -771,7 +1078,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Install docker
shell: bash
@ -883,7 +1190,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -967,7 +1274,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1074,7 +1381,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1156,7 +1463,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1235,6 +1542,7 @@ jobs:
functional_docker_windows,
functional_hyperv_windows,
functional_baremetal_ubuntu18_04,
functional_docker_ubuntu_arm64,
addons_certs_docker_ubuntu,
addons_certs_virtualbox_macos,
multinode_docker_ubuntu,
@ -1257,12 +1565,14 @@ jobs:
cp -r ./functional_docker_windows ./all_reports/
cp -r ./functional_hyperv_windows ./all_reports/
cp -r ./functional_baremetal_ubuntu18_04 ./all_reports/
cp -r ./functional_docker_ubuntu_arm64 ./all_reports/
cp -r ./addons_certs_docker_ubuntu ./all_reports/
cp -r ./addons_certs_virtualbox_macos ./all_reports/
cp -r ./multinode_docker_ubuntu ./all_reports/
cp -r ./multinode_virtualbox_macos ./all_reports/
cp -r ./preload_dockerflags_docker_ubuntu ./all_reports/
cp -r ./pause_preload_dockerflags_virtualbox_macos ./all_reports/
- uses: actions/upload-artifact@v1
with:
name: all_reports

View File

@ -118,7 +118,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -203,7 +203,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Install docker
shell: bash
@ -348,7 +348,7 @@ jobs:
continue-on-error: true
shell: powershell
run: |
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
@ -485,7 +485,7 @@ jobs:
shell: powershell
run: |
$ErrorActionPreference = "SilentlyContinue"
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
@ -555,6 +555,198 @@ jobs:
If ($numPass -eq 0){ exit 2 }
If ($numPass -lt 33){ exit 2 }
If ($numFail -eq 0){ exit 0 }
scheduled_stop_docker_windows:
needs: [build_minikube]
env:
TIME_ELAPSED: time
JOB_NAME: "scheduled_stop_docker_windows"
GOPOGH_RESULT: ""
STAT: ""
runs-on: [self-hosted, windows-10-ent, 8CPUs]
steps:
- name: Clean up
continue-on-error: true
shell: powershell
run: |
echo $env:computerName
ls
$ErrorActionPreference = "SilentlyContinue"
cd minikube_binaries
ls
$env:KUBECONFIG="${pwd}\testhome\kubeconfig"
$env:MINIKUBE_HOME="${pwd}\testhome"
.\minikube-windows-amd64.exe delete --all --purge
Get-VM | Where-Object {$_.Name -ne "DockerDesktopVM"} | Foreach {
.\minikube-windows-amd64.exe delete -p $_.Name
Suspend-VM $_.Name
Stop-VM $_.Name -Force
Remove-VM $_.Name -Force
}
cd ..
Remove-Item minikube_binaries -Force -Recurse
ls
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Start Docker Desktop
shell: powershell
continue-on-error: true
run: |
$ErrorActionPreference = "SilentlyContinue"
docker ps 2>&1 | Out-Null
$docker_running = $?
if (!$docker_running) {
Write-Output "Starting Docker as an administrator"
Start-Process 'C:/Program Files/Docker/Docker/Docker Desktop.exe' -Verb runAs
}
while (!$docker_running) {
Start-Sleep 5
docker ps 2>&1 | Out-Null
$docker_running = $?
}
Write-Output "Docker is running"
docker system prune -f
- name: Info
shell: powershell
run: |
echo $env:computername
echo "------------------------"
docker info
echo "------------------------"
docker volume ls
echo "------------------------"
docker system info --format '{{json .}}'
echo "------------------------"
- uses: actions/setup-go@v2
with:
go-version: '1.15.5'
stable: true
- name: Install tools
continue-on-error: true
shell: powershell
run: |
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
if (Test-Path 'C:\Program Files\Docker\Docker\resources\bin\kubectl.exe') { Remove-Item 'C:\Program Files\Docker\Docker\resources\bin\kubectl.exe' };
- name: Run Integration Test in powershell
shell: powershell
run: |
cd minikube_binaries
New-Item -Force -Path "report" -ItemType Directory
New-Item -Force -Path "testhome" -ItemType Directory
$START_TIME=(GET-DATE)
$env:KUBECONFIG="${pwd}\testhome\kubeconfig"
$env:MINIKUBE_HOME="${pwd}\testhome"
$ErrorActionPreference = "SilentlyContinue"
.\e2e-windows-amd64.exe --minikube-start-args="--driver=docker" --test.timeout=15m --timeout-multiplier=1 --test.v --test.run=TestScheduledStopWindows --binary=./minikube-windows-amd64.exe | Tee-Object -FilePath ".\report\testout.txt"
$END_TIME=(GET-DATE)
echo $END_TIME
$DURATION=(NEW-TIMESPAN -Start $START_TIME -End $END_TIME)
echo $DURATION
$SECS=($DURATION.TotalSeconds)
$MINS=($DURATION.TotalMinutes)
$T_ELAPSED="$MINS m $SECS s"
echo "----"
echo $T_ELAPSED
echo "----"
echo "TIME_ELAPSED=$T_ELAPSED" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
scheduled_stop_hyperv_windows:
needs: [build_minikube]
env:
TIME_ELAPSED: time
JOB_NAME: "scheduled_stop_hyperv_windows"
GOPOGH_RESULT: ""
runs-on: [self-hosted, windows-10-ent, Standard_D16s_v3, hyperv]
steps:
- name: Clean up
continue-on-error: true
shell: powershell
run: |
echo $env:computerName
ls
$ErrorActionPreference = "SilentlyContinue"
cd minikube_binaries
ls
$env:KUBECONFIG="${pwd}\testhome\kubeconfig"
$env:MINIKUBE_HOME="${pwd}\testhome"
.\minikube-windows-amd64.exe delete --all --purge
Get-VM | Where-Object {$_.Name -ne "DockerDesktopVM"} | Foreach {
Stop-VM -Name $_.Name -Force
Remove-VM $_.Name -Force
}
cd ..
Remove-Item minikube_binaries -Force -Recurse
ls
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Start Docker Desktop
shell: powershell
continue-on-error: true
run: |
$ErrorActionPreference = "SilentlyContinue"
docker ps 2>&1 | Out-Null
$docker_running = $?
if (!$docker_running) {
Write-Output "Starting Docker as an administrator"
Start-Process 'C:/Program Files/Docker/Docker/Docker Desktop.exe' -Verb runAs
}
while (!$docker_running) {
Start-Sleep 5
docker ps 2>&1 | Out-Null
$docker_running = $?
}
Write-Output "Docker is running"
docker system prune -f
- name: Info
continue-on-error: true
shell: powershell
run: |
$ErrorActionPreference = "SilentlyContinue"
cd minikube_binaries
ls
echo $env:computername
Get-WmiObject -class Win32_ComputerSystem
- uses: actions/setup-go@v2
with:
go-version: '1.15.5'
stable: true
- name: Install tools
continue-on-error: true
shell: powershell
run: |
$ErrorActionPreference = "SilentlyContinue"
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
choco install -y kubernetes-cli
choco install -y jq
choco install -y caffeine
if (Test-Path 'C:\Program Files\Docker\Docker\resources\bin\kubectl.exe') { Remove-Item 'C:\Program Files\Docker\Docker\resources\bin\kubectl.exe' };
- name: Run Integration Test in powershell
shell: powershell
run: |
cd minikube_binaries
New-Item -Force -Path "report" -ItemType Directory
New-Item -Force -Path "testhome" -ItemType Directory
$START_TIME=(GET-DATE)
$env:KUBECONFIG="${pwd}\testhome\kubeconfig"
$env:MINIKUBE_HOME="${pwd}\testhome"
$ErrorActionPreference = "SilentlyContinue"
.\e2e-windows-amd64.exe --minikube-start-args="--driver=hyperv" --test.timeout=20m --timeout-multiplier=1.5 --test.v --test.run=TestScheduledStopWindows --binary=./minikube-windows-amd64.exe | Tee-Object -FilePath ".\report\testout.txt"
$END_TIME=(GET-DATE)
echo $END_TIME
$DURATION=(NEW-TIMESPAN -Start $START_TIME -End $END_TIME)
echo $DURATION
$SECS=($DURATION.TotalSeconds)
$MINS=($DURATION.TotalMinutes)
$T_ELAPSED="$MINS m $SECS s"
echo "----"
echo $T_ELAPSED
echo "----"
echo "TIME_ELAPSED=$T_ELAPSED" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
functional_baremetal_ubuntu18_04:
needs: [build_minikube]
env:
@ -590,7 +782,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -649,6 +841,121 @@ jobs:
if [ "$numPass" -eq 0 ];then echo "*** 0 Passed! ***";exit 2;fi
if [ "$numPass" -lt 26 ];then echo "*** Failed to pass at least 26 ! ***";exit 2;fi
if [ "$numPass" -eq 0 ];then echo "*** Passed! ***";exit 0;fi
functional_docker_ubuntu_arm64:
needs: [ build_minikube ]
runs-on: [ self-hosted, arm64 ]
env:
TIME_ELAPSED: time
JOB_NAME: "functional_docker_ubuntu_arm64"
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/arm64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-arm64
sudo install gopogh-linux-arm64 /usr/local/bin/gopogh
- name: Install tools
shell: bash
run: |
sudo apt update
sudo apt install -y jq docker
- name: Docker Info
shell: bash
run: |
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info --format='{{json .}}'|| true
echo "--------------------------"
docker ps || true
echo "--------------------------"
whoami || true
echo "--------------------------"
hostname || true
echo "--------------------------"
# go 1.14.6+ is needed because of this bug https://github.com/golang/go/issues/39308
- uses: actions/setup-go@v2
with:
go-version: '1.15.2'
stable: true
- name: Download Binaries
uses: actions/download-artifact@v1
with:
name: minikube_binaries
- name: Run Integration Test
continue-on-error: false
# bash {0} to allow test to continue to next step. in case of
shell: bash {0}
run: |
cd minikube_binaries
mkdir -p report
mkdir -p testhome
chmod a+x e2e-*
chmod a+x minikube-*
START_TIME=$(date -u +%s)
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-arm64 -minikube-start-args=--vm-driver=docker -test.run TestFunctional -test.timeout=10m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-arm64 2>&1 | tee ./report/testout.txt
END_TIME=$(date -u +%s)
TIME_ELAPSED=$(($END_TIME-$START_TIME))
min=$((${TIME_ELAPSED}/60))
sec=$((${TIME_ELAPSED}%60))
TIME_ELAPSED="${min} min $sec seconds "
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
- name: Generate HTML Report
shell: bash
run: |
cd minikube_binaries
export PATH=${PATH}:`go env GOPATH`/bin
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
echo status: ${STAT}
FailNum=$(echo $STAT | jq '.NumberOfFail')
TestsNum=$(echo $STAT | jq '.NumberOfTests')
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
echo 'STAT<<EOF' >> $GITHUB_ENV
echo "${STAT}" >> $GITHUB_ENV
echo 'EOF' >> $GITHUB_ENV
- uses: actions/upload-artifact@v1
with:
name: functional_docker_ubuntu_arm64
path: minikube_binaries/report
- name: The End Result - functional_docker_ubuntu_arm64
shell: bash
run: |
echo ${GOPOGH_RESULT}
numFail=$(echo $STAT | jq '.NumberOfFail')
numPass=$(echo $STAT | jq '.NumberOfPass')
echo "*******************${numPass} Passes :) *******************"
echo $STAT | jq '.PassedTests' || true
echo "*******************************************************"
echo "---------------- ${numFail} Failures :( ----------------------------"
echo $STAT | jq '.FailedTests' || true
echo "-------------------------------------------------------"
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
if [ "$numPass" -eq 0 ];then echo "*** 0 Passed! ***";exit 2;fi
if [ "$numPass" -lt 0 ];then echo "*** Failed to pass at least 20! ***";exit 2;fi
if [ "$numPass" -eq 0 ];then echo "*** Passed! ***";exit 0;fi
addons_certs_docker_ubuntu:
runs-on: ubuntu-18.04
env:
@ -684,10 +991,9 @@ jobs:
go-version: '1.15.5'
stable: true
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -769,7 +1075,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Install docker
shell: bash
@ -877,11 +1183,11 @@ jobs:
with:
go-version: '1.15.5'
stable: true
- name: Install gopogh
- name: Install gopogh
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -965,7 +1271,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1072,7 +1378,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-linux-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1154,7 +1460,7 @@ jobs:
shell: bash
run: |
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-darwin-amd64
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64
sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
- name: Download Binaries
uses: actions/download-artifact@v1
@ -1229,10 +1535,13 @@ jobs:
needs:
[
functional_docker_ubuntu,
functional_docker_ubuntu_arm64,
functional_virtualbox_macos,
functional_docker_windows,
functional_hyperv_windows,
functional_baremetal_ubuntu18_04,
scheduled_stop_docker_windows,
scheduled_stop_hyperv_windows,
addons_certs_docker_ubuntu,
addons_certs_virtualbox_macos,
multinode_docker_ubuntu,
@ -1251,10 +1560,13 @@ jobs:
mkdir -p all_reports
ls -lah
cp -r ./functional_docker_ubuntu ./all_reports/
cp -r ./functional_docker_ubuntu_arm64 ./all_reports/
cp -r ./functional_virtualbox_macos ./all_reports/
cp -r ./functional_docker_windows ./all_reports/
cp -r ./functional_hyperv_windows ./all_reports/
cp -r ./functional_baremetal_ubuntu18_04 ./all_reports/
cp -r ./scheduled_stop_docker_windows ./all_reports/
cp -r ./scheduled_stop_hyperv_windows ./all_reports/
cp -r ./addons_certs_docker_ubuntu ./all_reports/
cp -r ./addons_certs_virtualbox_macos ./all_reports/
cp -r ./multinode_docker_ubuntu ./all_reports/

View File

@ -1,5 +1,107 @@
# Release Notes
## Version 1.17.1 - 2020-01-28
Features:
* Add new flag --user and to log executed commands [#10106](https://github.com/kubernetes/minikube/pull/10106)
* Unhide --schedule flag for scheduled stop [#10274](https://github.com/kubernetes/minikube/pull/10274)
Bugs:
* fixing debian and arch concurrent multiarch builds [#9998](https://github.com/kubernetes/minikube/pull/9998)
* configure the crictl yaml file to avoid the warning [#10221](https://github.com/kubernetes/minikube/pull/10221)
Thank you to our contributors for this release!
- Anders F Björklund
- BLasan
- Ilya Zuyev
- Jiefeng He
- Jorropo
- Medya Ghazizadeh
- Niels de Vos
- Priya Wadhwa
- Sharif Elgamal
- Steven Powell
- Thomas Strömberg
- andrzejsydor
## Version 1.17.0 - 2020-01-22
Features:
* Add multi-arch (arm64) support for docker/podman drivers [#9969](https://github.com/kubernetes/minikube/pull/9969)
* Add new driver "SSH" to bootstrap generic minkube clusters over ssh [#10099](https://github.com/kubernetes/minikube/pull/10099)
* Annotate Kubeconfig with 'Extension' to identify contexts/clusters created by minikube [#10126](https://github.com/kubernetes/minikube/pull/10126)
* Add support for systemd cgroup to containerd runtime [#10100](https://github.com/kubernetes/minikube/pull/10100)
* Add --network flag to select docker network to run with docker driver [#9538](https://github.com/kubernetes/minikube/pull/9538)
Minor Improvements:
* Improve exit codes by splitting PROVIDER_DOCKER_ERROR into more specific reason codes [#10212](https://github.com/kubernetes/minikube/pull/10212)
* Improve warning about the suggested memory size [#10187](https://github.com/kubernetes/minikube/pull/10187)
* Remove systemd dependency from none driver [#10112](https://github.com/kubernetes/minikube/pull/10112)
* Delete the existing cluster if guest driver mismatch [#10084](https://github.com/kubernetes/minikube/pull/10084)
* Remove obsolete 'vmwarefusion' driver, add friendly message [#9958](https://github.com/kubernetes/minikube/pull/9958)
* UI: Add a spinner for `creating container` step [#10024](https://github.com/kubernetes/minikube/pull/10024)
* Added validation for --insecure-registry values [#9977](https://github.com/kubernetes/minikube/pull/9977)
Bug Fixes:
* Snap package manger: fix cert copy issue [#10042](https://github.com/kubernetes/minikube/pull/10042)
* Ignore non-socks5 ALL_PROXY env var when checking docker status [#10109](https://github.com/kubernetes/minikube/pull/10109)
* Docker-env: avoid race condition in bootstrap certs for parallel runs [#10118](https://github.com/kubernetes/minikube/pull/10118)
* Fix 'profile list' for multi-node clusters [#9955](https://github.com/kubernetes/minikube/pull/9955)
* Change metrics-server pull policy to IfNotPresent [#10096](https://github.com/kubernetes/minikube/pull/10096)
* Podman driver: Handle installations without default bridge [#10092](https://github.com/kubernetes/minikube/pull/10092)
* Fix docker inspect network go template for network which doesn't have MTU [#10053](https://github.com/kubernetes/minikube/pull/10053)
* Docker/Podman: add control-plane to NO_PROXY [#10046](https://github.com/kubernetes/minikube/pull/10046)
* "cache add": fix command error when not specifying :latest tag [#10058](https://github.com/kubernetes/minikube/pull/10058)
* Networking: Fix ClusterDomain value in kubeadm KubeletConfiguration [#10049](https://github.com/kubernetes/minikube/pull/10049)
* Fix typo in the csi-hostpath-driver addon name [#10034](https://github.com/kubernetes/minikube/pull/10034)
Upgrades:
* bump default Kubernetes version to v1.20.2 and add v1.20.3-rc.0 [#10194](https://github.com/kubernetes/minikube/pull/10194)
* Upgrade Docker, from 20.10.1 to 20.10.2 [#10154](https://github.com/kubernetes/minikube/pull/10154)
* ISO: Added sch_htb, cls_fw, cls_matchall, act_connmark and ifb kernel modules [#10048](https://github.com/kubernetes/minikube/pull/10048)
* ISO: add XFS_QUOTA support to guest vm [#9999](https://github.com/kubernetes/minikube/pull/9999)
Thank you to our contributors for this release!
- AUT0R3V
- Amar Tumballi
- Anders F Björklund
- Daehyeok Mun
- Eric Briand
- Ilya Zuyev
- Ivan Milchev
- Jituri, Pranav
- Laurent VERDOÏA
- Ling Samuel
- Medya Ghazizadeh
- Oliver Radwell
- Pablo Caderno
- Priya Wadhwa
- Sadlil
- Sharif Elgamal
- Steven Powell
- Thomas Strömberg
- Yanshu Zhao
- alonyb
- anencore94
- cxsu
- zouyu
## Version 1.16.0 - 2020-12-17
* fix ip node retrieve for none driver [#9986](https://github.com/kubernetes/minikube/pull/9986)
* remove experimental warning for multinode [#9987](https://github.com/kubernetes/minikube/pull/9987)
* Enable Ingress Addon for Docker Windows [#9761](https://github.com/kubernetes/minikube/pull/9761)

191
Makefile
View File

@ -14,8 +14,8 @@
# Bump these on release - and please check ISO_VERSION for correctness.
VERSION_MAJOR ?= 1
VERSION_MINOR ?= 16
VERSION_BUILD ?= 0
VERSION_MINOR ?= 17
VERSION_BUILD ?= 1
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
VERSION ?= v$(RAW_VERSION)
@ -23,10 +23,13 @@ KUBERNETES_VERSION ?= $(shell egrep "DefaultKubernetesVersion =" pkg/minikube/co
KIC_VERSION ?= $(shell egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2)
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
ISO_VERSION ?= v1.16.0
ISO_VERSION ?= v1.17.0
# Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta
DEB_VERSION ?= $(subst -,~,$(RAW_VERSION))
DEB_REVISION ?= 0
RPM_VERSION ?= $(DEB_VERSION)
RPM_REVISION ?= 0
# used by hack/jenkins/release_build_and_upload.sh and KVM_BUILD_IMAGE, see also BUILD_IMAGE below
GO_VERSION ?= 1.15.5
@ -43,7 +46,14 @@ COMMIT_SHORT = $(shell git rev-parse --short HEAD 2> /dev/null || true)
HYPERKIT_BUILD_IMAGE ?= karalabe/xgo-1.12.x
# NOTE: "latest" as of 2020-05-13. kube-cross images aren't updated as often as Kubernetes
# https://github.com/kubernetes/kubernetes/blob/master/build/build-image/cross/VERSION
BUILD_IMAGE ?= us.gcr.io/k8s-artifacts-prod/build-image/kube-cross:v$(GO_VERSION)-1
#
# TODO: See https://github.com/kubernetes/minikube/issues/10276
#BUILD_IMAGE ?= us.gcr.io/k8s-artifacts-prod/build-image/kube-cross:v$(GO_VERSION)-1
BUILD_IMAGE ?= golang:1.16.0-buster
#
ISO_BUILD_IMAGE ?= $(REGISTRY)/buildroot-image
KVM_BUILD_IMAGE ?= $(REGISTRY)/kvm-build-image:$(GO_VERSION)
@ -98,7 +108,7 @@ STORAGE_PROVISIONER_MANIFEST ?= $(REGISTRY)/storage-provisioner:$(STORAGE_PROVIS
STORAGE_PROVISIONER_IMAGE ?= $(REGISTRY)/storage-provisioner-$(GOARCH):$(STORAGE_PROVISIONER_TAG)
# Set the version information for the Kubernetes servers
MINIKUBE_LDFLAGS := -X k8s.io/minikube/pkg/version.version=$(VERSION) -X k8s.io/minikube/pkg/version.isoVersion=$(ISO_VERSION) -X k8s.io/minikube/pkg/version.isoPath=$(ISO_BUCKET) -X k8s.io/minikube/pkg/version.gitCommitID=$(COMMIT) -X k8s.io/minikube/pkg/version.storageProvisionerVersion=$(STORAGE_PROVISIONER_TAG)
MINIKUBE_LDFLAGS := -X k8s.io/minikube/pkg/version.version=$(VERSION) -X k8s.io/minikube/pkg/version.isoVersion=$(ISO_VERSION) -X k8s.io/minikube/pkg/version.gitCommitID=$(COMMIT) -X k8s.io/minikube/pkg/version.storageProvisionerVersion=$(STORAGE_PROVISIONER_TAG)
PROVISIONER_LDFLAGS := "-X k8s.io/minikube/pkg/storage.version=$(STORAGE_PROVISIONER_TAG) -s -w -extldflags '-static'"
MINIKUBEFILES := ./cmd/minikube/
@ -114,7 +124,7 @@ MARKDOWNLINT ?= markdownlint
MINIKUBE_MARKDOWN_FILES := README.md CONTRIBUTING.md CHANGELOG.md
MINIKUBE_BUILD_TAGS := go_getter_nos3 go_getter_nogcs
MINIKUBE_BUILD_TAGS :=
MINIKUBE_INTEGRATION_BUILD_TAGS := integration $(MINIKUBE_BUILD_TAGS)
CMD_SOURCE_DIRS = cmd pkg
@ -201,10 +211,15 @@ out/minikube-linux-aarch64: out/minikube-linux-arm64
$(if $(quiet),@echo " CP $@")
$(Q)cp $< $@
.PHONY: minikube-linux-amd64 minikube-linux-arm64 minikube-darwin-amd64 minikube-windows-amd64.exe
.PHONY: minikube-linux-amd64 minikube-linux-arm64
minikube-linux-amd64: out/minikube-linux-amd64 ## Build Minikube for Linux 64bit
minikube-linux-arm64: out/minikube-linux-arm64 ## Build Minikube for ARM 64bit
minikube-darwin-amd64: out/minikube-darwin-amd64 ## Build Minikube for Darwin 64bit
minikube-linux-arm64: out/minikube-linux-arm64 ## Build Minikube for arm 64bit
.PHONY: minikube-darwin-amd64 minikube-darwin-arm64
minikube-darwin-amd64: out/minikube-darwin-amd64 ## Build Minikube for Darwin x86 64bit
minikube-darwin-arm64: out/minikube-darwin-arm64 ## Build Minikube for Darwin ARM 64bit
.PHONY: minikube-windows-amd64.exe
minikube-windows-amd64.exe: out/minikube-windows-amd64.exe ## Build Minikube for Windows 64bit
out/minikube-%: $(SOURCE_GENERATED) $(SOURCE_FILES)
@ -216,10 +231,12 @@ else
go build -tags "$(MINIKUBE_BUILD_TAGS)" -ldflags="$(MINIKUBE_LDFLAGS)" -a -o $@ k8s.io/minikube/cmd/minikube
endif
.PHONY: e2e-linux-amd64 e2e-darwin-amd64 e2e-windows-amd64.exe
e2e-linux-amd64: out/e2e-linux-amd64 ## Execute end-to-end testing for Linux 64bit
e2e-darwin-amd64: out/e2e-darwin-amd64 ## Execute end-to-end testing for Darwin 64bit
e2e-windows-amd64.exe: out/e2e-windows-amd64.exe ## Execute end-to-end testing for Windows 64bit
.PHONY: e2e-linux-amd64 e2e-linux-arm64 e2e-darwin-amd64 e2e-windows-amd64.exe
e2e-linux-amd64: out/e2e-linux-amd64 ## build end2end binary for Linux x86 64bit
e2e-linux-arm64: out/e2e-linux-arm64 ## build end2end binary for Linux ARM 64bit
e2e-darwin-amd64: out/e2e-darwin-amd64 ## build end2end binary for Darwin x86 64bit
e2e-darwin-arm64: out/e2e-darwin-arm64 ## build end2end binary for Darwin ARM 64bit
e2e-windows-amd64.exe: out/e2e-windows-amd64.exe ## build end2end binary for Windows 64bit
out/e2e-%: out/minikube-%
GOOS="$(firstword $(subst -, ,$*))" GOARCH="$(lastword $(subst -, ,$(subst $(IS_EXE), ,$*)))" go test -ldflags="${MINIKUBE_LDFLAGS}" -c k8s.io/minikube/test/integration --tags="$(MINIKUBE_INTEGRATION_BUILD_TAGS)" -o $@
@ -266,11 +283,11 @@ iso_in_docker:
--user $(shell id -u):$(shell id -g) --env HOME=/tmp --env IN_DOCKER=1 \
$(ISO_BUILD_IMAGE) /bin/bash
test-iso: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go
test-iso: $(SOURCE_GENERATED)
go test -v $(INTEGRATION_TESTS_TO_RUN) --tags=iso --minikube-start-args="--iso-url=file://$(shell pwd)/out/buildroot/output/images/rootfs.iso9660"
.PHONY: test-pkg
test-pkg/%: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go ## Trigger packaging test
test-pkg/%: $(SOURCE_GENERATED) ## Trigger packaging test
go test -v -test.timeout=60m ./$* --tags="$(MINIKUBE_BUILD_TAGS)"
.PHONY: all
@ -320,7 +337,7 @@ else
endif
.PHONY: test
test: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go ## Trigger minikube test
test: $(SOURCE_GENERATED) ## Trigger minikube test
MINIKUBE_LDFLAGS="${MINIKUBE_LDFLAGS}" ./test.sh
.PHONY: generate-docs
@ -377,7 +394,7 @@ darwin: minikube-darwin-amd64 ## Build minikube for Darwin 64bit
linux: minikube-linux-amd64 ## Build minikube for Linux 64bit
.PHONY: e2e-cross
e2e-cross: e2e-linux-amd64 e2e-darwin-amd64 e2e-windows-amd64.exe ## End-to-end cross test
e2e-cross: e2e-linux-amd64 e2e-linux-arm64 e2e-darwin-amd64 e2e-windows-amd64.exe ## End-to-end cross test
.PHONY: checksum
checksum: ## Generate checksums
@ -396,6 +413,7 @@ clean: ## Clean build
rm -f pkg/minikube/assets/assets.go
rm -f pkg/minikube/translate/translations.go
rm -rf ./vendor
rm -rf /tmp/tmp.*.minikube_*
.PHONY: gendocs
gendocs: out/docs/minikube.md ## Generate documentation
@ -413,8 +431,17 @@ gofmt: ## Run go fmt and list the files differs from gofmt's
vet: ## Run go vet
@go vet $(SOURCE_PACKAGES)
.PHONY: imports
imports: ## Run goimports and modify files in place
@goimports -w $(SOURCE_DIRS)
.PHONY: goimports
goimports: ## Run goimports and list the files differs from goimport's
@goimports -l $(SOURCE_DIRS)
@test -z "`goimports -l $(SOURCE_DIRS)`"
.PHONY: golint
golint: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go ## Run golint
golint: $(SOURCE_GENERATED) ## Run golint
@golint -set_exit_status $(SOURCE_PACKAGES)
.PHONY: gocyclo
@ -429,17 +456,17 @@ out/linters/golangci-lint-$(GOLINT_VERSION):
# this one is meant for local use
.PHONY: lint
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
lint: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go
lint: $(SOURCE_GENERATED)
docker run --rm -v $(pwd):/app -w /app golangci/golangci-lint:$(GOLINT_VERSION) \
golangci-lint run ${GOLINT_OPTIONS} --skip-dirs "cmd/drivers/kvm|cmd/drivers/hyperkit|pkg/drivers/kvm|pkg/drivers/hyperkit" ./...
else
lint: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go out/linters/golangci-lint-$(GOLINT_VERSION) ## Run lint
lint: $(SOURCE_GENERATED) out/linters/golangci-lint-$(GOLINT_VERSION) ## Run lint
./out/linters/golangci-lint-$(GOLINT_VERSION) run ${GOLINT_OPTIONS} ./...
endif
# lint-ci is slower version of lint and is meant to be used in ci (travis) to avoid out of memory leaks.
.PHONY: lint-ci
lint-ci: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go out/linters/golangci-lint-$(GOLINT_VERSION) ## Run lint-ci
lint-ci: $(SOURCE_GENERATED) out/linters/golangci-lint-$(GOLINT_VERSION) ## Run lint-ci
GOGC=${GOLINT_GOGC} ./out/linters/golangci-lint-$(GOLINT_VERSION) run \
--concurrency ${GOLINT_JOBS} ${GOLINT_OPTIONS} ./...
@ -457,44 +484,53 @@ mdlint:
verify-iso: # Make sure the current ISO exists in the expected bucket
gsutil stat gs://$(ISO_BUCKET)/minikube-$(ISO_VERSION).iso
out/docs/minikube.md: $(shell find "cmd") $(shell find "pkg/minikube/constants") pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go
out/docs/minikube.md: $(shell find "cmd") $(shell find "pkg/minikube/constants") $(SOURCE_GENERATED)
go run -ldflags="$(MINIKUBE_LDFLAGS)" -tags gendocs hack/help_text/gen_help_text.go
.PHONY: deb_version
deb_version:
@echo $(DEB_VERSION)-$(DEB_REVISION)
.PHONY: deb_version_base
deb_version_base:
@echo $(DEB_VERSION)
out/minikube_$(DEB_VERSION).deb: out/minikube_$(DEB_VERSION)-0_amd64.deb
out/minikube_$(DEB_VERSION).deb: out/minikube_$(DEB_VERSION)-$(DEB_REVISION)_amd64.deb
cp $< $@
out/minikube_$(DEB_VERSION)-0_%.deb: out/minikube-linux-%
cp -r installers/linux/deb/minikube_deb_template out/minikube_$(DEB_VERSION)
chmod 0755 out/minikube_$(DEB_VERSION)/DEBIAN
sed -E -i 's/--VERSION--/'$(DEB_VERSION)'/g' out/minikube_$(DEB_VERSION)/DEBIAN/control
sed -E -i 's/--ARCH--/'$*'/g' out/minikube_$(DEB_VERSION)/DEBIAN/control
out/minikube_$(DEB_VERSION)-$(DEB_REVISION)_%.deb: out/minikube-linux-%
$(eval DEB_PACKAGING_DIRECTORY_$*=$(shell mktemp -d --suffix ".minikube_$(DEB_VERSION)-$*-deb"))
cp -r installers/linux/deb/minikube_deb_template/* $(DEB_PACKAGING_DIRECTORY_$*)/
chmod 0755 $(DEB_PACKAGING_DIRECTORY_$*)/DEBIAN
sed -E -i 's/--VERSION--/'$(DEB_VERSION)'/g' $(DEB_PACKAGING_DIRECTORY_$*)/DEBIAN/control
sed -E -i 's/--ARCH--/'$*'/g' $(DEB_PACKAGING_DIRECTORY_$*)/DEBIAN/control
if [ "$*" = "amd64" ]; then \
sed -E -i 's/--RECOMMENDS--/virtualbox/' out/minikube_$(DEB_VERSION)/DEBIAN/control; \
sed -E -i 's/--RECOMMENDS--/virtualbox/' $(DEB_PACKAGING_DIRECTORY_$*)/DEBIAN/control; \
else \
sed -E -i '/Recommends: --RECOMMENDS--/d' out/minikube_$(DEB_VERSION)/DEBIAN/control; \
sed -E -i '/Recommends: --RECOMMENDS--/d' $(DEB_PACKAGING_DIRECTORY_$*)/DEBIAN/control; \
fi
mkdir -p out/minikube_$(DEB_VERSION)/usr/bin
cp $< out/minikube_$(DEB_VERSION)/usr/bin/minikube
fakeroot dpkg-deb --build out/minikube_$(DEB_VERSION) $@
rm -rf out/minikube_$(DEB_VERSION)
mkdir -p $(DEB_PACKAGING_DIRECTORY_$*)/usr/bin
cp $< $(DEB_PACKAGING_DIRECTORY_$*)/usr/bin/minikube
fakeroot dpkg-deb --build $(DEB_PACKAGING_DIRECTORY_$*) $@
rm -rf $(DEB_PACKAGING_DIRECTORY_$*)
rpm_version:
@echo $(RPM_VERSION)
@echo $(RPM_VERSION)-$(RPM_REVISION)
out/minikube-$(RPM_VERSION).rpm: out/minikube-$(RPM_VERSION)-0.x86_64.rpm
out/minikube-$(RPM_VERSION).rpm: out/minikube-$(RPM_VERSION)-$(RPM_REVISION).x86_64.rpm
cp $< $@
out/minikube-$(RPM_VERSION)-0.%.rpm: out/minikube-linux-%
cp -r installers/linux/rpm/minikube_rpm_template out/minikube-$(RPM_VERSION)
sed -E -i 's/--VERSION--/'$(RPM_VERSION)'/g' out/minikube-$(RPM_VERSION)/minikube.spec
sed -E -i 's|--OUT--|'$(PWD)/out'|g' out/minikube-$(RPM_VERSION)/minikube.spec
$(eval RPM_PACKAGING_DIRECTORY_$*=$(shell mktemp -d --suffix ".minikube_$(RPM_VERSION)-$*-rpm"))
cp -r installers/linux/rpm/minikube_rpm_template/* $(RPM_PACKAGING_DIRECTORY_$*)/
sed -E -i 's/--VERSION--/'$(RPM_VERSION)'/g' $(RPM_PACKAGING_DIRECTORY_$*)/minikube.spec
sed -E -i 's|--OUT--|'$(PWD)/out'|g' $(RPM_PACKAGING_DIRECTORY_$*)/minikube.spec
rpmbuild -bb -D "_rpmdir $(PWD)/out" --target $* \
out/minikube-$(RPM_VERSION)/minikube.spec
@mv out/$*/minikube-$(RPM_VERSION)-0.$*.rpm out/ && rmdir out/$*
rm -rf out/minikube-$(RPM_VERSION)
$(RPM_PACKAGING_DIRECTORY_$*)/minikube.spec
@mv out/$*/minikube-$(RPM_VERSION)-$(RPM_REVISION).$*.rpm out/ && rmdir out/$*
rm -rf $(RPM_PACKAGING_DIRECTORY_$*)
.PHONY: apt
apt: out/Release ## Generate apt package file
@ -566,7 +602,7 @@ release-hyperkit-driver: install-hyperkit-driver checksum ## Copy hyperkit using
gsutil cp $(GOBIN)/docker-machine-driver-hyperkit.sha256 gs://minikube/drivers/hyperkit/$(VERSION)/
.PHONY: check-release
check-release: ## Execute go test
check-release: $(SOURCE_GENERATED) ## Execute go test
go test -v ./deploy/minikube/release_sanity_test.go -tags=release
buildroot-image: $(ISO_BUILD_IMAGE) # convenient alias to build the docker container
@ -594,11 +630,32 @@ storage-provisioner-image: storage-provisioner-image-$(GOARCH) ## Build storage-
storage-provisioner-image-%: out/storage-provisioner-%
docker build -t $(REGISTRY)/storage-provisioner-$*:$(STORAGE_PROVISIONER_TAG) -f deploy/storage-provisioner/Dockerfile --build-arg arch=$* .
.PHONY: kic-base-image
kic-base-image: ## builds the kic base image and tags local/kicbase:latest and local/kicbase:$(KIC_VERSION)-$(COMMIT_SHORT)
docker build -f ./deploy/kicbase/Dockerfile -t local/kicbase:$(KIC_VERSION) --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --cache-from $(KIC_BASE_IMAGE_GCR) ./deploy/kicbase
docker tag local/kicbase:$(KIC_VERSION) local/kicbase:latest
docker tag local/kicbase:$(KIC_VERSION) local/kicbase:$(KIC_VERSION)-$(COMMIT_SHORT)
X_DOCKER_BUILDER ?= minikube-builder
X_BUILD_ENV ?= DOCKER_CLI_EXPERIMENTAL=enabled
.PHONY: docker-multi-arch-builder
docker-multi-arch-builder:
env $(X_BUILD_ENV) docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
env $(X_BUILD_ENV) docker buildx rm --builder $(X_DOCKER_BUILDER) || true
env $(X_BUILD_ENV) docker buildx create --name $(X_DOCKER_BUILDER) --buildkitd-flags '--debug' || true
KICBASE_ARCH = linux/arm64,linux/amd64
KICBASE_IMAGE_REGISTRIES ?= $(REGISTRY)/kicbase:$(KIC_VERSION) $(REGISTRY_GH)/kicbase:$(KIC_VERSION) kicbase/stable:$(KIC_VERSION)
.PHONY: push-kic-base-image
push-kic-base-image: docker-multi-arch-builder ## Push multi-arch local/kicbase:latest to all remote registries
ifdef AUTOPUSH
docker login gcr.io/k8s-minikube
docker login docker.pkg.github.com
docker login
endif
$(foreach REG,$(KICBASE_IMAGE_REGISTRIES), \
@docker pull $(REG) && echo "Image already exist in registry" && exit 1 || echo "Image doesn't exist in registry";)
ifndef AUTOPUSH
$(call user_confirm, 'Are you sure you want to push $(KICBASE_IMAGE_REGISTRIES) ?')
endif
env $(X_BUILD_ENV) docker buildx build --builder $(X_DOCKER_BUILDER) --platform $(KICBASE_ARCH) $(addprefix -t ,$(KICBASE_IMAGE_REGISTRIES)) --push --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) ./deploy/kicbase
.PHONY: upload-preloaded-images-tar
upload-preloaded-images-tar: out/minikube # Upload the preloaded images for oldest supported, newest supported, and default kubernetes versions to GCS.
@ -630,35 +687,8 @@ ifndef AUTOPUSH
endif
docker push $(IMAGE)
.PHONY: push-kic-base-image-gcr
push-kic-base-image-gcr: kic-base-image ## Push kic-base to gcr
docker login gcr.io/k8s-minikube
docker tag local/kicbase:latest $(KIC_BASE_IMAGE_GCR)
$(MAKE) push-docker IMAGE=$(KIC_BASE_IMAGE_GCR)
.PHONY: push-kic-base-image-gh
push-kic-base-image-gh: kic-base-image ## Push kic-base to github
docker login docker.pkg.github.com
docker tag local/kicbase:latest $(KIC_BASE_IMAGE_GH)
$(MAKE) push-docker IMAGE=$(KIC_BASE_IMAGE_GH)
.PHONY: push-kic-base-image-hub
push-kic-base-image-hub: kic-base-image ## Push kic-base to docker hub
docker login
docker tag local/kicbase:latest $(KIC_BASE_IMAGE_HUB)
$(MAKE) push-docker IMAGE=$(KIC_BASE_IMAGE_HUB)
.PHONY: push-kic-base-image
push-kic-base-image: ## Push local/kicbase:latest to all remote registries
ifndef AUTOPUSH
$(call user_confirm, 'Are you sure you want to push: $(KIC_BASE_IMAGE_GH) & $(KIC_BASE_IMAGE_GCR) & $(KIC_BASE_IMAGE_HUB) ?')
$(MAKE) push-kic-base-image AUTOPUSH=true
else
$(MAKE) push-kic-base-image-gcr push-kic-base-image-hub push-kic-base-image-gh
endif
.PHONY: out/gvisor-addon
out/gvisor-addon: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go ## Build gvisor addon
out/gvisor-addon: $(SOURCE_GENERATED) ## Build gvisor addon
$(if $(quiet),@echo " GO $@")
$(Q)GOOS=linux CGO_ENABLED=0 go build -o $@ cmd/gvisor/gvisor.go
@ -818,3 +848,12 @@ else
export UPDATE_TARGET="all" && \
go run update_kubernetes_version.go)
endif
.PHONY: stress
stress: ## run the stress tests
go test -test.v -test.timeout=2h ./test/stress -loops=10 | tee "./out/testout_$(COMMIT_SHORT).txt"
.PHONY: update-gopogh-version
update-gopogh-version: ## update gopogh version
(cd hack/update/gopogh_version && \
go run update_gopogh_version.go)

1
OWNERS
View File

@ -18,6 +18,7 @@ approvers:
- medyagh
- josedonizetti
- priyawadhwa
- ilya-zuyev
emeritus_approvers:
- dlorenc
- luxas

View File

@ -1,6 +1,6 @@
# minikube
[![Actions Status](https://github.com/kubernetes/minikube/workflows/Master/badge.svg)](https://github.com/kubernetes/minikube/actions)
[![Actions Status](https://github.com/kubernetes/minikube/workflows/build/badge.svg)](https://github.com/kubernetes/minikube/actions)
[![GoReport Widget]][GoReport Status]
[![Github All Releases](https://img.shields.io/github/downloads/kubernetes/minikube/total.svg)](https://github.com/kubernetes/minikube/releases/latest)
[![Latest Release](https://img.shields.io/github/v/release/kubernetes/minikube?include_prereleases)](https://github.com/kubernetes/minikube/releases/latest)

View File

@ -1,4 +1,4 @@
// +build darwin
// +build darwin,!arm64
/*
Copyright 2016 The Kubernetes Authors All rights reserved.

View File

@ -18,17 +18,25 @@ package cmd
import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
"k8s.io/klog/v2"
cmdConfig "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/image"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/node"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/reason"
)
// cacheImageConfigKey is the config field name used to store which images we have previously cached
const cacheImageConfigKey = "cache"
var (
all string
)
// cacheCmd represents the cache command
var cacheCmd = &cobra.Command{
Use: "cache",
@ -42,8 +50,9 @@ var addCacheCmd = &cobra.Command{
Short: "Add an image to local cache.",
Long: "Add an image to local cache.",
Run: func(cmd *cobra.Command, args []string) {
out.WarningT("\"minikube cache\" will be deprecated in upcoming versions, please switch to \"minikube image load\"")
// Cache and load images into docker daemon
if err := machine.CacheAndLoadImages(args); err != nil {
if err := machine.CacheAndLoadImages(args, cacheAddProfiles()); err != nil {
exit.Error(reason.InternalCacheLoad, "Failed to cache and load images", err)
}
// Add images to config file
@ -53,6 +62,26 @@ var addCacheCmd = &cobra.Command{
},
}
func addCacheCmdFlags() {
addCacheCmd.Flags().Bool(all, false, "Add image to cache for all running minikube clusters")
}
func cacheAddProfiles() []*config.Profile {
if viper.GetBool(all) {
validProfiles, _, err := config.ListProfiles() // need to load image to all profiles
if err != nil {
klog.Warningf("error listing profiles: %v", err)
}
return validProfiles
}
profile := viper.GetString(config.ProfileName)
p, err := config.LoadProfile(profile)
if err != nil {
exit.Message(reason.Usage, "{{.profile}} profile is not valid: {{.err}}", out.V{"profile": profile, "err": err})
}
return []*config.Profile{p}
}
// deleteCacheCmd represents the cache delete command
var deleteCacheCmd = &cobra.Command{
Use: "delete",
@ -76,7 +105,7 @@ var reloadCacheCmd = &cobra.Command{
Short: "reload cached images.",
Long: "reloads images previously added using the 'cache add' subcommand",
Run: func(cmd *cobra.Command, args []string) {
err := node.CacheAndLoadImagesInConfig()
err := node.CacheAndLoadImagesInConfig(cacheAddProfiles())
if err != nil {
exit.Error(reason.GuestCacheLoad, "Failed to reload cached images", err)
}
@ -84,6 +113,7 @@ var reloadCacheCmd = &cobra.Command{
}
func init() {
addCacheCmdFlags()
cacheCmd.AddCommand(addCacheCmd)
cacheCmd.AddCommand(deleteCacheCmd)
cacheCmd.AddCommand(reloadCacheCmd)

View File

@ -22,6 +22,7 @@ import (
"regexp"
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/addons"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/mustload"
@ -208,6 +209,11 @@ var addonsConfigureCmd = &cobra.Command{
if err := config.SaveProfile(profile, cfg); err != nil {
out.ErrT(style.Fatal, "Failed to save config {{.profile}}", out.V{"profile": profile})
}
// Re-enable metallb addon in order to generate template manifest files with Load Balancer Start/End IP
if err := addons.EnableOrDisableAddon(cfg, "metallb", "true"); err != nil {
out.ErrT(style.Fatal, "Failed to configure metallb IP {{.profile}}", out.V{"profile": profile})
}
case "ingress":
profile := ClusterFlagValue()
_, cfg := mustload.Partial(profile)

View File

@ -20,7 +20,9 @@ import (
"fmt"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/addons"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/out"
@ -29,9 +31,10 @@ import (
)
var addonsEnableCmd = &cobra.Command{
Use: "enable ADDON_NAME",
Short: "Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list ",
Long: "Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list ",
Use: "enable ADDON_NAME",
Short: "Enables the addon w/ADDON_NAME within minikube. For a list of available addons use: minikube addons list ",
Long: "Enables the addon w/ADDON_NAME within minikube. For a list of available addons use: minikube addons list ",
Example: "minikube addons enable dashboard",
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
exit.Message(reason.Usage, "usage: minikube addons enable ADDON_NAME")
@ -42,6 +45,8 @@ var addonsEnableCmd = &cobra.Command{
out.Step(style.Waiting, "enable metrics-server addon instead of heapster addon because heapster is deprecated")
addon = "metrics-server"
}
viper.Set(config.AddonImages, images)
viper.Set(config.AddonRegistries, registries)
err := addons.SetAndSave(ClusterFlagValue(), addon, "true")
if err != nil {
exit.Error(reason.InternalEnable, "enable failed", err)
@ -63,6 +68,13 @@ var addonsEnableCmd = &cobra.Command{
},
}
var (
images string
registries string
)
func init() {
addonsEnableCmd.Flags().StringVar(&images, "images", "", "Images used by this addon. Separated by commas.")
addonsEnableCmd.Flags().StringVar(&registries, "registries", "", "Registries used by this addon. Separated by commas.")
AddonsCmd.AddCommand(addonsEnableCmd)
}

View File

@ -0,0 +1,70 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"os"
"github.com/olekukonko/tablewriter"
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/reason"
)
var addonsImagesCmd = &cobra.Command{
Use: "images ADDON_NAME",
Short: "List image names the addon w/ADDON_NAME used. For a list of available addons use: minikube addons list",
Long: "List image names the addon w/ADDON_NAME used. For a list of available addons use: minikube addons list",
Example: "minikube addons images ingress",
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
exit.Message(reason.Usage, "usage: minikube addons images ADDON_NAME")
}
addon := args[0]
// allows for additional prompting of information when enabling addons
if conf, ok := assets.Addons[addon]; ok {
if conf.Images != nil {
out.Infof("{{.name}} has following images:", out.V{"name": addon})
var tData [][]string
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Image Name", "Default Image", "Default Registry"})
table.SetAutoFormatHeaders(true)
table.SetBorders(tablewriter.Border{Left: true, Top: true, Right: true, Bottom: true})
table.SetCenterSeparator("|")
for imageName, defaultImage := range conf.Images {
tData = append(tData, []string{imageName, defaultImage, conf.Registries[imageName]})
}
table.AppendBulk(tData)
table.Render()
} else {
out.Infof("{{.name}} doesn't have images.", out.V{"name": addon})
}
} else {
out.FailureT("No such addon {{.name}}", out.V{"name": addon})
}
},
}
func init() {
AddonsCmd.AddCommand(addonsImagesCmd)
}

View File

@ -41,6 +41,7 @@ import (
)
var output string
var isLight bool
var profileListCmd = &cobra.Command{
Use: "list",
@ -58,8 +59,18 @@ var profileListCmd = &cobra.Command{
},
}
func listProfiles() (validProfiles, invalidProfiles []*config.Profile, err error) {
if isLight {
validProfiles, err = config.ListValidProfiles()
} else {
validProfiles, invalidProfiles, err = config.ListProfiles()
}
return validProfiles, invalidProfiles, err
}
func printProfilesTable() {
validProfiles, invalidProfiles, err := config.ListProfiles()
validProfiles, invalidProfiles, err := listProfiles()
if err != nil {
klog.Warningf("error loading profiles: %v", err)
@ -75,6 +86,13 @@ func printProfilesTable() {
}
func updateProfilesStatus(profiles []*config.Profile) {
if isLight {
for _, p := range profiles {
p.Status = "Skipped"
}
return
}
api, err := machine.NewAPIClient()
if err != nil {
klog.Errorf("failed to get machine api client %v", err)
@ -92,7 +110,7 @@ func profileStatus(p *config.Profile, api libmachine.API) string {
exit.Error(reason.GuestCpConfig, "error getting primary control plane", err)
}
host, err := machine.LoadHost(api, driver.MachineName(*p.Config, cp))
host, err := machine.LoadHost(api, config.MachineName(*p.Config, cp))
if err != nil {
klog.Warningf("error loading profiles: %v", err)
return "Unknown"
@ -168,7 +186,7 @@ func warnInvalidProfiles(invalidProfiles []*config.Profile) {
}
func printProfilesJSON() {
validProfiles, invalidProfiles, err := config.ListProfiles()
validProfiles, invalidProfiles, err := listProfiles()
updateProfilesStatus(validProfiles)
@ -195,5 +213,6 @@ func profilesOrDefault(profiles []*config.Profile) []*config.Profile {
func init() {
profileListCmd.Flags().StringVarP(&output, "output", "o", "table", "The output format. One of 'json', 'table'")
profileListCmd.Flags().BoolVarP(&isLight, "light", "l", false, "If true, returns list of profiles faster by skipping validating the status of the cluster.")
ProfileCmd.AddCommand(profileListCmd)
}

View File

@ -17,12 +17,14 @@ limitations under the License.
package cmd
import (
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"time"
"github.com/docker/machine/libmachine/mcnerror"
"github.com/mitchellh/go-ps"
@ -94,7 +96,7 @@ func init() {
}
// shotgun cleanup to delete orphaned docker container data
func deleteContainersAndVolumes(ociBin string) {
func deleteContainersAndVolumes(ctx context.Context, ociBin string) {
if _, err := exec.LookPath(ociBin); err != nil {
klog.Infof("skipping deleteContainersAndVolumes for %s: %v", ociBin, err)
return
@ -108,7 +110,7 @@ func deleteContainersAndVolumes(ociBin string) {
klog.Infof("error delete containers by label %q (might be okay): %+v", delLabel, errs)
}
errs = oci.DeleteAllVolumesByLabel(ociBin, delLabel)
errs = oci.DeleteAllVolumesByLabel(ctx, ociBin, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
klog.Warningf("error delete volumes by label %q (might be okay): %+v", delLabel, errs)
}
@ -118,7 +120,7 @@ func deleteContainersAndVolumes(ociBin string) {
return
}
errs = oci.PruneAllVolumesByLabel(ociBin, delLabel)
errs = oci.PruneAllVolumesByLabel(ctx, ociBin, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
klog.Warningf("error pruning volumes by label %q (might be okay): %+v", delLabel, errs)
}
@ -146,10 +148,12 @@ func runDelete(cmd *cobra.Command, args []string) {
}
exit.Message(reason.Usage, "Usage: minikube delete --all --purge")
}
delCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
if deleteAll {
deleteContainersAndVolumes(oci.Docker)
deleteContainersAndVolumes(oci.Podman)
deleteContainersAndVolumes(delCtx, oci.Docker)
deleteContainersAndVolumes(delCtx, oci.Podman)
errs := DeleteProfiles(profilesToDelete)
register.Reg.SetStep(register.Done)
@ -182,8 +186,8 @@ func runDelete(cmd *cobra.Command, args []string) {
if orphan {
// TODO: generalize for non-KIC drivers: #8040
deletePossibleKicLeftOver(cname, driver.Docker)
deletePossibleKicLeftOver(cname, driver.Podman)
deletePossibleKicLeftOver(delCtx, cname, driver.Docker)
deletePossibleKicLeftOver(delCtx, cname, driver.Podman)
}
}
@ -206,7 +210,9 @@ func DeleteProfiles(profiles []*config.Profile) []error {
klog.Infof("DeleteProfiles")
var errs []error
for _, profile := range profiles {
err := deleteProfile(profile)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
err := deleteProfile(ctx, profile)
if err != nil {
mm, loadErr := machine.LoadMachine(profile.Name)
@ -224,7 +230,7 @@ func DeleteProfiles(profiles []*config.Profile) []error {
}
// TODO: remove and/or move to delete package: #8040
func deletePossibleKicLeftOver(cname string, driverName string) {
func deletePossibleKicLeftOver(ctx context.Context, cname string, driverName string) {
bin := ""
switch driverName {
case driver.Docker:
@ -241,13 +247,12 @@ func deletePossibleKicLeftOver(cname string, driverName string) {
}
klog.Infof("deleting possible KIC leftovers for %s (driver=%s) ...", cname, driverName)
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, cname)
cs, err := oci.ListContainersByLabel(bin, delLabel)
cs, err := oci.ListContainersByLabel(ctx, bin, delLabel)
if err == nil && len(cs) > 0 {
for _, c := range cs {
out.Step(style.DeletingHost, `Deleting container "{{.name}}" ...`, out.V{"name": cname})
err := oci.DeleteContainer(bin, c)
err := oci.DeleteContainer(ctx, bin, c)
if err != nil { // it will error if there is no container to delete
klog.Errorf("error deleting container %q. You may want to delete it manually :\n%v", cname, err)
}
@ -255,11 +260,27 @@ func deletePossibleKicLeftOver(cname string, driverName string) {
}
}
errs := oci.DeleteAllVolumesByLabel(bin, delLabel)
if bin == oci.Podman {
// podman volume does not support --filter
err := oci.RemoveVolume(bin, cname)
if err != nil {
klog.Warningf("error deleting volume %s (might be okay).'\n:%v", cname, err)
}
}
errs := oci.DeleteAllVolumesByLabel(ctx, bin, delLabel)
if errs != nil { // it will not error if there is nothing to delete
klog.Warningf("error deleting volumes (might be okay).\nTo see the list of volumes run: 'docker volume ls'\n:%v", errs)
}
if bin == oci.Podman {
// podman network does not support --filter
err := oci.RemoveNetwork(bin, cname)
if err != nil {
klog.Warningf("error deleting network %s (might be okay).'\n:%v", cname, err)
}
}
errs = oci.DeleteKICNetworks(bin)
if errs != nil {
klog.Warningf("error deleting leftover networks (might be okay).\nTo see the list of networks: 'docker network ls'\n:%v", errs)
@ -270,13 +291,13 @@ func deletePossibleKicLeftOver(cname string, driverName string) {
return
}
errs = oci.PruneAllVolumesByLabel(bin, delLabel)
errs = oci.PruneAllVolumesByLabel(ctx, bin, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
klog.Warningf("error pruning volume (might be okay):\n%v", errs)
}
}
func deleteProfile(profile *config.Profile) error {
func deleteProfile(ctx context.Context, profile *config.Profile) error {
klog.Infof("Deleting %s", profile.Name)
register.Reg.SetStep(register.Deleting)
@ -288,8 +309,8 @@ func deleteProfile(profile *config.Profile) error {
if driver.IsKIC(profile.Config.Driver) {
out.Step(style.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": profile.Name, "driver_name": profile.Config.Driver})
for _, n := range profile.Config.Nodes {
machineName := driver.MachineName(*profile.Config, n)
deletePossibleKicLeftOver(machineName, profile.Config.Driver)
machineName := config.MachineName(*profile.Config, n)
deletePossibleKicLeftOver(ctx, machineName, profile.Config.Driver)
}
}
} else {
@ -309,7 +330,7 @@ func deleteProfile(profile *config.Profile) error {
return DeletionError{Err: delErr, Errtype: MissingProfile}
}
if err == nil && driver.BareMetal(cc.Driver) {
if err == nil && (driver.BareMetal(cc.Driver) || driver.IsSSH(cc.Driver)) {
if err := uninstallKubernetes(api, *cc, cc.Nodes[0], viper.GetString(cmdcfg.Bootstrapper)); err != nil {
deletionError, ok := err.(DeletionError)
if ok {
@ -347,7 +368,7 @@ func deleteHosts(api libmachine.API, cc *config.ClusterConfig) {
if cc != nil {
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n)
machineName := config.MachineName(*cc, n)
if err := machine.DeleteHost(api, machineName); err != nil {
switch errors.Cause(err).(type) {
case mcnerror.ErrHostDoesNotExist:
@ -412,7 +433,7 @@ func profileDeletionErr(cname string, additionalInfo string) error {
func uninstallKubernetes(api libmachine.API, cc config.ClusterConfig, n config.Node, bsName string) error {
out.Step(style.Resetting, "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...", out.V{"kubernetes_version": cc.KubernetesConfig.KubernetesVersion, "bootstrapper_name": bsName})
host, err := machine.LoadHost(api, driver.MachineName(cc, n))
host, err := machine.LoadHost(api, config.MachineName(cc, n))
if err != nil {
return DeletionError{Err: fmt.Errorf("unable to load host: %v", err), Errtype: MissingCluster}
}
@ -500,7 +521,7 @@ func deleteProfileDirectory(profile string) {
func deleteMachineDirectories(cc *config.ClusterConfig) {
if cc != nil {
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n)
machineName := config.MachineName(*cc, n)
deleteProfileDirectory(machineName)
}
}

View File

@ -23,6 +23,7 @@ import (
"fmt"
"io"
"net"
"net/url"
"os"
"os/exec"
"strconv"
@ -455,9 +456,41 @@ func dockerEnvVarsList(ec DockerEnvConfig) []string {
}
}
func isValidDockerProxy(env string) bool {
val := os.Getenv(env)
if val == "" {
return true
}
u, err := url.Parse(val)
if err != nil {
klog.Warningf("Parsing proxy env variable %s=%s error: %v", env, val, err)
return false
}
switch u.Scheme {
// See moby/moby#25740
case "socks5", "socks5h":
return true
default:
return false
}
}
func removeInvalidDockerProxy() {
for _, env := range []string{"ALL_PROXY", "all_proxy"} {
if !isValidDockerProxy(env) {
klog.Warningf("Ignoring non socks5 proxy env variable %s=%s", env, os.Getenv(env))
os.Unsetenv(env)
}
}
}
// tryDockerConnectivity will try to connect to docker env from user's POV to detect the problem if it needs reset or not
func tryDockerConnectivity(bin string, ec DockerEnvConfig) ([]byte, error) {
c := exec.Command(bin, "version", "--format={{.Server}}")
// See #10098 for details
removeInvalidDockerProxy()
c.Env = append(os.Environ(), dockerEnvVarsList(ec)...)
klog.Infof("Testing Docker connectivity with: %v", c)
return c.CombinedOutput()

View File

@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"os"
"testing"
"github.com/google/go-cmp/cmp"
@ -306,3 +307,37 @@ MINIKUBE_ACTIVE_DOCKERD
})
}
}
func TestValidDockerProxy(t *testing.T) {
var tests = []struct {
proxy string
isValid bool
}{
{
proxy: "socks5://192.168.0.1:1080",
isValid: true,
},
{
proxy: "",
isValid: true,
},
{
proxy: "socks://192.168.0.1:1080",
isValid: false,
},
{
proxy: "http://192.168.0.1:1080",
isValid: false,
},
}
for _, tc := range tests {
os.Setenv("ALL_PROXY", tc.proxy)
valid := isValidDockerProxy("ALL_PROXY")
if tc.isValid && valid != tc.isValid {
t.Errorf("Expect %#v to be valid docker proxy", tc.proxy)
} else if !tc.isValid && valid != tc.isValid {
t.Errorf("Expect %#v to be invalid docker proxy", tc.proxy)
}
}
}

58
cmd/minikube/cmd/image.go Normal file
View File

@ -0,0 +1,58 @@
/*
Copyright 2017 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/reason"
)
// imageCmd represents the image command
var imageCmd = &cobra.Command{
Use: "image",
Short: "Load a local image into minikube",
Long: "Load a local image into minikube",
}
// loadImageCmd represents the image load command
var loadImageCmd = &cobra.Command{
Use: "load",
Short: "Load a local image into minikube",
Long: "Load a local image into minikube",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
exit.Message(reason.Usage, "Please provide an image in your local daemon to load into minikube via <minikube image load IMAGE_NAME>")
}
// Cache and load images into docker daemon
profile, err := config.LoadProfile(viper.GetString(config.ProfileName))
if err != nil {
exit.Error(reason.Usage, "loading profile", err)
}
img := args[0]
if err := machine.CacheAndLoadImages([]string{img}, []*config.Profile{profile}); err != nil {
exit.Error(reason.GuestImageLoad, "Failed to load image", err)
}
},
}
func init() {
imageCmd.AddCommand(loadImageCmd)
}

View File

@ -17,7 +17,11 @@ limitations under the License.
package cmd
import (
"context"
"time"
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/mustload"
@ -46,8 +50,10 @@ var nodeDeleteCmd = &cobra.Command{
}
if driver.IsKIC(co.Config.Driver) {
machineName := driver.MachineName(*co.Config, *n)
deletePossibleKicLeftOver(machineName, co.Config.Driver)
machineName := config.MachineName(*co.Config, *n)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
deletePossibleKicLeftOver(ctx, machineName, co.Config.Driver)
}
out.Step(style.Deleted, "Node {{.name}} was successfully deleted.", out.V{"name": name})

View File

@ -22,7 +22,7 @@ import (
"github.com/spf13/cobra"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/reason"
@ -47,7 +47,7 @@ var nodeListCmd = &cobra.Command{
}
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n)
machineName := config.MachineName(*cc, n)
fmt.Printf("%s\t%s\n", machineName, n.IP)
}
os.Exit(0)

View File

@ -21,7 +21,7 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/mustload"
@ -49,7 +49,7 @@ var nodeStartCmd = &cobra.Command{
exit.Error(reason.GuestNodeRetrieve, "retrieving node", err)
}
machineName := driver.MachineName(*cc, *n)
machineName := config.MachineName(*cc, *n)
if machine.IsRunning(api, machineName) {
out.Step(style.Check, "{{.name}} is already running", out.V{"name": name})
os.Exit(0)

View File

@ -18,7 +18,7 @@ package cmd
import (
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/mustload"
@ -45,7 +45,7 @@ var nodeStopCmd = &cobra.Command{
exit.Error(reason.GuestNodeRetrieve, "retrieving node", err)
}
machineName := driver.MachineName(*cc, *n)
machineName := config.MachineName(*cc, *n)
err = machine.StopHost(api, machineName)
if err != nil {

View File

@ -24,9 +24,9 @@ import (
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/machine"
@ -73,7 +73,7 @@ func runPause(cmd *cobra.Command, args []string) {
out.Step(style.Pause, "Pausing node {{.name}} ... ", out.V{"name": name})
host, err := machine.LoadHost(co.API, driver.MachineName(*co.Config, n))
host, err := machine.LoadHost(co.API, config.MachineName(*co.Config, n))
if err != nil {
exit.Error(reason.GuestLoadHost, "Error getting host", err)
}

View File

@ -23,6 +23,7 @@ import (
"path/filepath"
"runtime"
"strings"
"time"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@ -31,10 +32,13 @@ import (
"k8s.io/kubectl/pkg/util/templates"
configCmd "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/audit"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/reason"
"k8s.io/minikube/pkg/minikube/translate"
)
@ -62,12 +66,33 @@ var RootCmd = &cobra.Command{
exit.Error(reason.HostHomeMkdir, "Error creating minikube directory", err)
}
}
userName := viper.GetString(config.UserFlag)
if !validateUsername(userName) {
out.WarningT("User name '{{.username}}' is not valid", out.V{"username": userName})
exit.Message(reason.Usage, "User name must be 60 chars or less.")
}
},
}
// Execute adds all child commands to the root command sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
defer audit.Log(time.Now())
// Check whether this is a windows binary (.exe) running inisde WSL.
if runtime.GOOS == "windows" && driver.IsMicrosoftWSL() {
var found = false
for _, a := range os.Args {
if a == "--force" {
found = true
break
}
}
if !found {
exit.Message(reason.WrongBinaryWSL, "You are trying to run windows .exe binary inside WSL, for better integration please use Linux binary instead (Download at https://minikube.sigs.k8s.io/docs/start/.). Otherwise if you still want to do this, you can do it using --force")
}
}
_, callingCmd := filepath.Split(os.Args[0])
if callingCmd == "kubectl" {
@ -89,6 +114,7 @@ func Execute() {
os.Args = append([]string{RootCmd.Use, callingCmd, "--"}, os.Args[1:]...)
}
}
for _, c := range RootCmd.Commands() {
c.Short = translate.T(c.Short)
c.Long = translate.T(c.Long)
@ -170,6 +196,7 @@ func init() {
RootCmd.PersistentFlags().StringP(config.ProfileName, "p", constants.DefaultClusterName, `The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently.`)
RootCmd.PersistentFlags().StringP(configCmd.Bootstrapper, "b", "kubeadm", "The name of the cluster bootstrapper that will set up the Kubernetes cluster.")
RootCmd.PersistentFlags().String(config.UserFlag, "", "Specifies the user executing the operation. Useful for auditing operations executed by 3rd party tools. Defaults to the operating system username.")
groups := templates.CommandGroups{
{
@ -190,6 +217,7 @@ func init() {
dockerEnvCmd,
podmanEnvCmd,
cacheCmd,
imageCmd,
},
},
{
@ -280,3 +308,7 @@ func addToPath(dir string) {
klog.Infof("Updating PATH: %s", dir)
os.Setenv("PATH", new)
}
func validateUsername(name string) bool {
return len(name) <= 60
}

View File

@ -20,7 +20,7 @@ import (
"path/filepath"
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/mustload"
@ -41,7 +41,7 @@ var sshKeyCmd = &cobra.Command{
exit.Error(reason.GuestNodeRetrieve, "retrieving node", err)
}
out.Ln(filepath.Join(localpath.MiniPath(), "machines", driver.MachineName(*cc, *n), "id_rsa"))
out.Ln(filepath.Join(localpath.MiniPath(), "machines", config.MachineName(*cc, *n), "id_rsa"))
},
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package cmd
import (
"context"
"encoding/json"
"fmt"
"math"
@ -35,8 +36,8 @@ import (
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/pkg/errors"
"github.com/shirou/gopsutil/cpu"
gopshost "github.com/shirou/gopsutil/host"
"github.com/shirou/gopsutil/v3/cpu"
gopshost "github.com/shirou/gopsutil/v3/host"
"github.com/spf13/cobra"
"github.com/spf13/viper"
@ -130,7 +131,7 @@ func platform() string {
// runStart handles the executes the flow of "minikube start"
func runStart(cmd *cobra.Command, args []string) {
register.SetEventLogPath(localpath.EventLog(ClusterFlagValue()))
ctx := context.Background()
out.SetJSON(outputFormat == "json")
if err := pkgtrace.Initialize(viper.GetString(trace)); err != nil {
exit.Message(reason.Usage, "error initializing tracing: {{.Error}}", out.V{"Error": err.Error()})
@ -176,6 +177,8 @@ func runStart(cmd *cobra.Command, args []string) {
if existing != nil {
upgradeExistingConfig(existing)
} else {
validateProfileName()
}
validateSpecifiedDriver(existing)
@ -217,7 +220,7 @@ func runStart(cmd *cobra.Command, args []string) {
klog.Warningf("%s profile does not exist, trying anyways.", ClusterFlagValue())
}
err = deleteProfile(profile)
err = deleteProfile(ctx, profile)
if err != nil {
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": ClusterFlagValue()})
}
@ -250,17 +253,6 @@ func runStart(cmd *cobra.Command, args []string) {
})
}
}
if existing.KubernetesConfig.ContainerRuntime == "crio" {
// Stop and start again if it's crio because it's broken above v1.17.3
out.WarningT("Due to issues with CRI-O post v1.17.3, we need to restart your cluster.")
out.WarningT("See details at https://github.com/kubernetes/minikube/issues/8861")
stopProfile(existing.Name)
starter, err = provisionWithDriver(cmd, ds, existing)
if err != nil {
exitGuestProvision(err)
}
}
}
kubeconfig, err := startWithDriver(cmd, starter, existing)
@ -306,7 +298,7 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *
os.Exit(0)
}
if driver.IsVM(driverName) {
if driver.IsVM(driverName) && !driver.IsSSH(driverName) {
url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL))
if err != nil {
return node.Starter{}, errors.Wrap(err, "Failed to cache ISO")
@ -480,7 +472,7 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co
out.ErrT(style.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": existing.Name})
}
err = deleteProfile(profile)
err = deleteProfile(context.Background(), profile)
if err != nil {
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": existing.Name})
}
@ -571,7 +563,7 @@ func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []regis
}
ds := driver.Status(d)
if ds.Name == "" {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}", out.V{"driver": d, "os": runtime.GOOS, "arch": runtime.GOARCH})
}
out.Step(style.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds, nil, true
@ -581,7 +573,7 @@ func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []regis
if d := viper.GetString("vm-driver"); d != "" {
ds := driver.Status(viper.GetString("vm-driver"))
if ds.Name == "" {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}", out.V{"driver": d, "os": runtime.GOOS, "arch": runtime.GOARCH})
}
out.Step(style.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds, nil, true
@ -625,7 +617,7 @@ func hostDriver(existing *config.ClusterConfig) string {
klog.Warningf("Unable to get control plane from existing config: %v", err)
return existing.Driver
}
machineName := driver.MachineName(*existing, cp)
machineName := config.MachineName(*existing, cp)
h, err := api.Load(machineName)
if err != nil {
klog.Warningf("api.Load failed for %s: %v", machineName, err)
@ -638,6 +630,25 @@ func hostDriver(existing *config.ClusterConfig) string {
return h.Driver.DriverName()
}
// validateProfileName makes sure that new profile name not duplicated with any of machine names in existing multi-node clusters.
func validateProfileName() {
profiles, err := config.ListValidProfiles()
if err != nil {
exit.Message(reason.InternalListConfig, "Unable to list profiles: {{.error}}", out.V{"error": err})
}
for _, p := range profiles {
for _, n := range p.Config.Nodes {
machineName := config.MachineName(*p.Config, n)
if ClusterFlagValue() == machineName {
out.WarningT("Profile name '{{.name}}' is duplicated with machine name '{{.machine}}' in profile '{{.profile}}'", out.V{"name": ClusterFlagValue(),
"machine": machineName,
"profile": p.Name})
exit.Message(reason.Usage, "Profile name should be unique")
}
}
}
}
// validateSpecifiedDriver makes sure that if a user has passed in a driver
// it matches the existing cluster if there is one
func validateSpecifiedDriver(existing *config.ClusterConfig) {
@ -668,6 +679,20 @@ func validateSpecifiedDriver(existing *config.ClusterConfig) {
return
}
out.WarningT("Deleting existing cluster {{.name}} with different driver {{.driver_name}} due to --delete-on-failure flag set by the user. ", out.V{"name": existing.Name, "driver_name": old})
if viper.GetBool(deleteOnFailure) {
// Start failed, delete the cluster
profile, err := config.LoadProfile(existing.Name)
if err != nil {
out.ErrT(style.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": existing.Name})
}
err = deleteProfile(context.Background(), profile)
if err != nil {
out.WarningT("Failed to delete cluster {{.name}}.", out.V{"name": existing.Name})
}
}
exit.Advice(
reason.GuestDrvMismatch,
`The existing "{{.name}}" cluster was created using the "{{.old}}" driver, which is incompatible with requested "{{.new}}" driver.`,
@ -687,7 +712,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
name := ds.Name
klog.Infof("validating driver %q against %+v", name, existing)
if !driver.Supported(name) {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS})
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}", out.V{"driver": name, "os": runtime.GOOS, "arch": runtime.GOARCH})
}
// if we are only downloading artifacts for a driver, we can stop validation here
@ -726,7 +751,11 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
}, `The '{{.driver}}' provider was not found: {{.error}}`, out.V{"driver": name, "error": st.Error})
}
id := fmt.Sprintf("PROVIDER_%s_ERROR", strings.ToUpper(name))
id := st.Reason
if id == "" {
id = fmt.Sprintf("PROVIDER_%s_ERROR", strings.ToUpper(name))
}
code := reason.ExProviderUnavailable
if !st.Running {
@ -837,7 +866,7 @@ func validateUser(drvName string) {
// memoryLimits returns the amount of memory allocated to the system and hypervisor, the return value is in MiB
func memoryLimits(drvName string) (int, int, error) {
info, cpuErr, memErr, diskErr := machine.CachedHostInfo()
info, cpuErr, memErr, diskErr := machine.LocalHostInfo()
if cpuErr != nil {
klog.Warningf("could not get system cpu info while verifying memory limits, which might be okay: %v", cpuErr)
}
@ -956,13 +985,13 @@ func validateRequestedMemorySize(req int, drvName string) {
}
}
// validateCPUCount validates the cpu count matches the minimum recommended
// validateCPUCount validates the cpu count matches the minimum recommended & not exceeding the available cpu count
func validateCPUCount(drvName string) {
var cpuCount int
if driver.BareMetal(drvName) {
// Uses the gopsutil cpu package to count the number of physical cpu cores
ci, err := cpu.Counts(false)
// Uses the gopsutil cpu package to count the number of logical cpu cores
ci, err := cpu.Counts(true)
if err != nil {
klog.Warningf("Unable to get CPU info: %v", err)
} else {
@ -990,6 +1019,22 @@ func validateCPUCount(drvName string) {
}
if si.CPUs < cpuCount {
if driver.IsDockerDesktop(drvName) {
out.Step(style.Empty, `- Ensure your {{.driver_name}} daemon has access to enough CPU/memory resources.`, out.V{"driver_name": drvName})
if runtime.GOOS == "darwin" {
out.Step(style.Empty, `- Docs https://docs.docker.com/docker-for-mac/#resources`, out.V{"driver_name": drvName})
}
if runtime.GOOS == "windows" {
out.String("\n\t")
out.Step(style.Empty, `- Docs https://docs.docker.com/docker-for-windows/#resources`, out.V{"driver_name": drvName})
}
}
exitIfNotForced(reason.RsrcInsufficientCores, "Requested cpu count {{.requested_cpus}} is greater than the available cpus of {{.avail_cpus}}", out.V{"requested_cpus": cpuCount, "avail_cpus": si.CPUs})
}
// looks good
if si.CPUs >= 2 {
return
@ -1079,6 +1124,20 @@ func validateFlags(cmd *cobra.Command, drvName string) {
}
}
if driver.IsSSH(drvName) {
sshIPAddress := viper.GetString(sshIPAddress)
if sshIPAddress == "" {
exit.Message(reason.Usage, "No IP address provided. Try specifying --ssh-ip-address, or see https://minikube.sigs.k8s.io/docs/drivers/ssh/")
}
if net.ParseIP(sshIPAddress) == nil {
_, err := net.LookupIP(sshIPAddress)
if err != nil {
exit.Error(reason.Usage, "Could not resolve IP address", err)
}
}
}
// validate kubeadm extra args
if invalidOpts := bsutil.FindInvalidExtraConfigFlags(config.ExtraOptions); len(invalidOpts) > 0 {
out.WarningT(

View File

@ -95,7 +95,7 @@ const (
waitTimeout = "wait-timeout"
nativeSSH = "native-ssh"
minUsableMem = 1800 // Kubernetes (kubeadm) will not start with less
minRecommendedMem = 2000 // Warn at no lower than existing configurations
minRecommendedMem = 1900 // Warn at no lower than existing configurations
minimumCPUS = 2
minimumDiskSize = 2000
autoUpdate = "auto-update-drivers"
@ -110,6 +110,12 @@ const (
network = "network"
startNamespace = "namespace"
trace = "trace"
sshIPAddress = "ssh-ip-address"
sshSSHUser = "ssh-user"
sshSSHKey = "ssh-key"
sshSSHPort = "ssh-port"
defaultSSHUser = "root"
defaultSSHPort = 22
)
var (
@ -136,7 +142,7 @@ func initMinikubeFlags() {
startCmd.Flags().String(kicBaseImage, kic.BaseImage, "The base image to use for docker/podman drivers. Intended for local development.")
startCmd.Flags().Bool(keepContext, false, "This will keep the existing kubectl context and will create a minikube context.")
startCmd.Flags().Bool(embedCerts, false, "if true, will embed the certs in kubeconfig.")
startCmd.Flags().String(containerRuntime, "docker", fmt.Sprintf("The container runtime to be used (%s).", strings.Join(cruntime.ValidRuntimes(), ", ")))
startCmd.Flags().String(containerRuntime, constants.DefaultContainerRuntime, fmt.Sprintf("The container runtime to be used (%s).", strings.Join(cruntime.ValidRuntimes(), ", ")))
startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.")
startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.")
startCmd.Flags().StringSliceVar(&config.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.")
@ -152,7 +158,7 @@ func initMinikubeFlags() {
startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.")
startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.")
startCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
startCmd.Flags().Bool(forceSystemd, false, "If set, force the container runtime to use sytemd as cgroup manager. Currently available for docker and crio. Defaults to false.")
startCmd.Flags().Bool(forceSystemd, false, "If set, force the container runtime to use sytemd as cgroup manager. Defaults to false.")
startCmd.Flags().StringP(network, "", "", "network to run minikube with. Only available with the docker/podman drivers. If left empty, minikube will create a new network.")
startCmd.Flags().StringVarP(&outputFormat, "output", "o", "text", "Format to print stdout in. Options include: [text,json]")
startCmd.Flags().StringP(trace, "", "", "Send trace events. Options include: [gcp]")
@ -221,6 +227,12 @@ func initNetworkingFlags() {
startCmd.Flags().String(serviceCIDR, constants.DefaultServiceCIDR, "The CIDR to be used for service cluster IPs.")
startCmd.Flags().StringArrayVar(&config.DockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)")
startCmd.Flags().StringArrayVar(&config.DockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)")
// ssh
startCmd.Flags().String(sshIPAddress, "", "IP address (ssh driver only)")
startCmd.Flags().String(sshSSHUser, defaultSSHUser, "SSH user (ssh driver only)")
startCmd.Flags().String(sshSSHKey, "", "SSH key (ssh driver only)")
startCmd.Flags().Int(sshSSHPort, defaultSSHPort, "SSH port (ssh driver only)")
}
// ClusterFlagValue returns the current cluster name based on flags
@ -335,6 +347,10 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
NatNicType: viper.GetString(natNicType),
StartHostTimeout: viper.GetDuration(waitTimeout),
ExposedPorts: viper.GetStringSlice(ports),
SSHIPAddress: viper.GetString(sshIPAddress),
SSHUser: viper.GetString(sshSSHUser),
SSHKey: viper.GetString(sshSSHKey),
SSHPort: viper.GetInt(sshSSHPort),
KubernetesConfig: config.KubernetesConfig{
KubernetesVersion: k8sVersion,
ClusterName: ClusterFlagValue(),

View File

@ -237,7 +237,7 @@ func writeStatusesAtInterval(duration time.Duration, api libmachine.API, cc *con
statuses = append(statuses, st)
} else {
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n)
machineName := config.MachineName(*cc, n)
klog.Infof("checking status of %s ...", machineName)
st, err := nodeStatus(api, *cc, n)
klog.Infof("%s status: %+v", machineName, st)
@ -301,7 +301,7 @@ func exitCode(statuses []*Status) int {
// nodeStatus looks up the status of a node
func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status, error) {
controlPlane := n.ControlPlane
name := driver.MachineName(cc, n)
name := config.MachineName(cc, n)
st := &Status{
Name: name,

View File

@ -28,7 +28,6 @@ import (
"github.com/spf13/viper"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/kubeconfig"
"k8s.io/minikube/pkg/minikube/localpath"
@ -62,10 +61,6 @@ func init() {
stopCmd.Flags().BoolVar(&keepActive, "keep-context-active", false, "keep the kube-context active after cluster is stopped. Defaults to false.")
stopCmd.Flags().DurationVar(&scheduledStopDuration, "schedule", 0*time.Second, "Set flag to stop cluster after a set amount of time (e.g. --schedule=5m)")
stopCmd.Flags().BoolVar(&cancelScheduledStop, "cancel-scheduled", false, "cancel any existing scheduled stop requests")
if err := stopCmd.Flags().MarkHidden("schedule"); err != nil {
klog.Info("unable to mark --schedule flag as hidden")
}
stopCmd.Flags().StringVarP(&outputFormat, "output", "o", "text", "Format to print stdout in. Options include: [text,json]")
if err := viper.GetViper().BindPFlags(stopCmd.Flags()); err != nil {
@ -138,7 +133,7 @@ func stopProfile(profile string) int {
defer api.Close()
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n)
machineName := config.MachineName(*cc, n)
nonexistent := stop(api, machineName)
if !nonexistent {

View File

@ -24,9 +24,9 @@ import (
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/machine"
@ -71,7 +71,7 @@ var unpauseCmd = &cobra.Command{
out.Step(style.Pause, "Unpausing node {{.name}} ... ", out.V{"name": name})
machineName := driver.MachineName(*co.Config, n)
machineName := config.MachineName(*co.Config, n)
host, err := machine.LoadHost(co.API, machineName)
if err != nil {
exit.Error(reason.GuestLoadHost, "Error getting host", err)

View File

@ -35,8 +35,9 @@ var updateContextCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
cname := ClusterFlagValue()
co := mustload.Running(cname)
// cluster extension metada for kubeconfig
updated, err := kubeconfig.UpdateEndpoint(cname, co.CP.Hostname, co.CP.Port, kubeconfig.PathFromEnv())
updated, err := kubeconfig.UpdateEndpoint(cname, co.CP.Hostname, co.CP.Port, kubeconfig.PathFromEnv(), kubeconfig.NewExtension())
if err != nil {
exit.Error(reason.HostKubeconfigUpdate, "update config", err)
}

View File

@ -180,10 +180,10 @@ spec:
containers:
- name: ambassador-operator
# Replace this with the built image name
image: {{default "quay.io/datawire" .ImageRepository}}/ambassador-operator:v1.2.3
image: {{.CustomRegistries.AmbassadorOperator | default .ImageRepository | default .Registries.AmbassadorOperator }}{{.Images.AmbassadorOperator}}
command:
- ambassador-operator
imagePullPolicy: Always
imagePullPolicy: IfNotPresent
env:
- name: WATCH_NAMESPACE
valueFrom:

View File

@ -57,7 +57,7 @@ spec:
serviceAccountName: csi-attacher
containers:
- name: csi-attacher
image: {{default "quay.io/k8scsi" .ImageRepository}}/csi-attacher:v3.0.0-rc1
image: {{.CustomRegistries.Attacher | default .ImageRepository | default .Registries.Attacher }}{{.Images.Attacher}}
args:
- --v=5
- --csi-address=/csi/csi.sock

View File

@ -53,7 +53,7 @@ spec:
spec:
containers:
- name: node-driver-registrar
image: {{default "quay.io/k8scsi" .ImageRepository}}/csi-node-driver-registrar:v1.3.0
image: {{.CustomRegistries.NodeDriverRegistrar | default .ImageRepository | default .Registries.NodeDriverRegistrar }}{{.Images.NodeDriverRegistrar}}
args:
- --v=5
- --csi-address=/csi/csi.sock
@ -78,7 +78,7 @@ spec:
name: csi-data-dir
- name: hostpath
image: {{default "quay.io/k8scsi" .ImageRepository}}/hostpathplugin:v1.4.0-rc2
image: {{.CustomRegistries.HostPathPlugin | default .ImageRepository | default .Registries.HostPathPlugin }}{{.Images.HostPathPlugin}}
args:
- "--drivername=hostpath.csi.k8s.io"
- "--v=5"
@ -123,7 +123,7 @@ spec:
volumeMounts:
- mountPath: /csi
name: socket-dir
image: {{default "quay.io/k8scsi" .ImageRepository}}/livenessprobe:v1.1.0
image: {{.CustomRegistries.LivenessProbe | default .ImageRepository | default .Registries.LivenessProbe }}{{.Images.LivenessProbe}}
args:
- --csi-address=/csi/csi.sock
- --health-port=9898

View File

@ -57,7 +57,7 @@ spec:
serviceAccountName: csi-provisioner
containers:
- name: csi-provisioner
image: {{default "gcr.io/k8s-staging-sig-storage" .ImageRepository}}/csi-provisioner:v2.0.0-rc2
image: {{.CustomRegistries.Provisioner | default .ImageRepository | default .Registries.Provisioner }}{{.Images.Provisioner}}
args:
- -v=5
- --csi-address=/csi/csi.sock

View File

@ -57,7 +57,7 @@ spec:
serviceAccountName: csi-resizer
containers:
- name: csi-resizer
image: {{default "quay.io/k8scsi" .ImageRepository}}/csi-resizer:v0.6.0-rc1
image: {{.CustomRegistries.Resizer | default .ImageRepository | default .Registries.Resizer }}{{.Images.Resizer}}
args:
- -v=5
- -csi-address=/csi/csi.sock

View File

@ -57,7 +57,7 @@ spec:
serviceAccount: csi-snapshotter
containers:
- name: csi-snapshotter
image: {{default "quay.io/k8scsi" .ImageRepository}}/csi-snapshotter:v2.1.0
image: {{.CustomRegistries.Snapshotter | default .ImageRepository | default .Registries.Snapshotter }}{{.Images.Snapshotter}}
args:
- -v=5
- --csi-address=/csi/csi.sock

View File

@ -36,7 +36,7 @@ spec:
spec:
containers:
- name: dashboard-metrics-scraper
image: {{default "kubernetesui" .ImageRepository}}/metrics-scraper:v1.0.4
image: {{.CustomRegistries.MetricsScraper | default .ImageRepository | default .Registries.MetricsScraper }}{{.Images.MetricsScraper}}
ports:
- containerPort: 8000
protocol: TCP
@ -91,7 +91,7 @@ spec:
containers:
- name: kubernetes-dashboard
# WARNING: This must match pkg/minikube/bootstrapper/images/images.go
image: {{default "kubernetesui" .ImageRepository}}/dashboard:v2.1.0
image: {{.CustomRegistries.Dashboard | default .ImageRepository | default .Registries.Dashboard }}{{.Images.Dashboard}}
ports:
- containerPort: 9090
protocol: TCP

View File

@ -34,7 +34,7 @@ spec:
spec:
containers:
- name: elasticsearch-logging
image: {{default "k8s.gcr.io" .ImageRepository}}/elasticsearch:v5.6.2
image: {{.CustomRegistries.Elasticsearch | default .ImageRepository | default .Registries.Elasticsearch }}{{.Images.Elasticsearch}}
resources:
limits:
cpu: 500m
@ -62,7 +62,7 @@ spec:
- name: ES_JAVA_OPTS
value: "-Xms1024m -Xmx1024m"
initContainers:
- image: {{default "registry.hub.docker.com/library" .ImageRepository}}/alpine:3.6
- image: {{.CustomRegistries.Alpine | default .ImageRepository | default .Registries.Alpine }}{{.Images.Alpine}}
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
name: elasticsearch-logging-init
securityContext:

View File

@ -31,7 +31,7 @@ spec:
spec:
containers:
- name: fluentd-es
image: {{default "k8s.gcr.io" .ImageRepository}}/fluentd-elasticsearch:v2.0.2
image: {{.CustomRegistries.FluentdElasticsearch | default .ImageRepository | default .Registries.FluentdElasticsearch }}{{.Images.FluentdElasticsearch}}
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q

View File

@ -34,7 +34,7 @@ spec:
spec:
containers:
- name: kibana-logging
image: {{default "docker.elastic.co/kibana" .ImageRepository}}/kibana:5.6.2
image: {{.CustomRegistries.Kibana | default .ImageRepository | default .Registries.Kibana }}{{.Images.Kibana}}
resources:
limits:
cpu: 500m

View File

@ -34,7 +34,7 @@ spec:
spec:
containers:
- name: freshpod
image: {{default "gcr.io/google-samples" .ImageRepository}}/freshpod:v0.0.1
image: {{.CustomRegistries.FreshPod | default .ImageRepository | default .Registries.FreshPod }}{{.Images.FreshPod}}
imagePullPolicy: IfNotPresent
volumeMounts:
- name: docker

View File

@ -41,7 +41,7 @@ rules:
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: minikube-gcp-auth-certs
@ -68,7 +68,7 @@ spec:
serviceAccountName: minikube-gcp-auth-certs
containers:
- name: create
image: {{default "jettech" .ImageRepository}}/kube-webhook-certgen:v1.3.0
image: {{.CustomRegistries.KubeWebhookCertgen | default .ImageRepository | default .Registries.KubeWebhookCertgen }}{{.Images.KubeWebhookCertgen}}
imagePullPolicy: IfNotPresent
args:
- create
@ -94,7 +94,7 @@ spec:
spec:
containers:
- name: gcp-auth
image: {{default "gcr.io/k8s-minikube" .ImageRepository}}/gcp-auth-webhook:v0.0.3
image: {{.CustomRegistries.GCPAuthWebhook | default .ImageRepository | default .Registries.GCPAuthWebhook }}{{.Images.GCPAuthWebhook}}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8443
@ -127,7 +127,7 @@ spec:
serviceAccountName: minikube-gcp-auth-certs
containers:
- name: patch
image: {{default "jettech" .ImageRepository}}/kube-webhook-certgen:v1.3.0
image: {{.CustomRegistries.KubeWebhookCertgen | default .ImageRepository | default .Registries.KubeWebhookCertgen }}{{.Images.KubeWebhookCertgen}}
imagePullPolicy: IfNotPresent
args:
- patch

View File

@ -50,7 +50,7 @@ spec:
hostPath:
path: /
initContainers:
- image: {{default "k8s.gcr.io" .ImageRepository}}/minikube-nvidia-driver-installer:e2d9b43228decf5d6f7dce3f0a85d390f138fa01
- image: {{.CustomRegistries.NvidiaDriverInstaller | default .ImageRepository | default .Registries.NvidiaDriverInstaller }}{{.Images.NvidiaDriverInstaller}}
name: nvidia-driver-installer
resources:
requests:
@ -72,5 +72,5 @@ spec:
- name: root-mount
mountPath: /root
containers:
- image: "{{default "k8s.gcr.io" .ImageRepository}}/pause:2.0"
- image: "{{default "k8s.gcr.io" .ImageRepository}}/{{.Images.Pause}}"
name: pause

View File

@ -43,7 +43,7 @@ spec:
hostPath:
path: /var/lib/kubelet/device-plugins
containers:
- image: {{default "nvidia" .ImageRepository}}/k8s-device-plugin:1.0.0-beta4
- image: {{.CustomRegistries.NvidiaDevicePlugin | default .ImageRepository | default .Registries.NvidiaDevicePlugin }}{{.Images.NvidiaDevicePlugin}}
command: ["/usr/bin/nvidia-device-plugin", "-logtostderr"]
name: nvidia-gpu-device-plugin
resources:

View File

@ -25,7 +25,7 @@ spec:
hostPID: true
containers:
- name: gvisor
image: {{default "gcr.io/k8s-minikube" .ImageRepository}}/gvisor-addon:3
image: {{.CustomRegistries.GvisorAddon | default .ImageRepository | default .Registries.GvisorAddon }}{{.Images.GvisorAddon}}
securityContext:
privileged: true
volumeMounts:

View File

@ -46,7 +46,7 @@ spec:
value: kube-system
- name: TILLER_HISTORY_MAX
value: "0"
image: {{default "gcr.io/kubernetes-helm" .ImageRepository}}/tiller:v2.16.12
image: {{.CustomRegistries.Tiller | default .ImageRepository | default .Registries.Tiller }}{{.Images.Tiller}}
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3

View File

@ -80,7 +80,7 @@ spec:
hostNetwork: true
containers:
- name: minikube-ingress-dns
image: {{default "cryptexlabs" .ImageRepository}}/minikube-ingress-dns:0.3.0
image: {{.CustomRegistries.IngressDNS | default .ImageRepository | default .Registries.IngressDNS }}{{.Images.IngressDNS}}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 53

View File

@ -49,7 +49,7 @@ spec:
serviceAccountName: ingress-nginx
containers:
- name: controller
image: {{default "us.gcr.io/k8s-artifacts-prod/ingress-nginx" .ImageRepository}}/controller:v0.40.2
image: {{.CustomRegistries.IngressController | default .ImageRepository | default .Registries.IngressController }}{{.Images.IngressController}}
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@ -220,7 +220,7 @@ spec:
spec:
containers:
- name: create
image: {{default "jettech" .ImageRepository}}/kube-webhook-certgen:v1.2.2
image: {{.CustomRegistries.KubeWebhookCertgenCreate | default .ImageRepository | default .Registries.KubeWebhookCertgenCreate }}{{.Images.KubeWebhookCertgenCreate}}
imagePullPolicy: IfNotPresent
args:
- create
@ -255,7 +255,7 @@ spec:
spec:
containers:
- name: patch
image: {{default "jettech" .ImageRepository}}/kube-webhook-certgen:v1.3.0
image: {{.CustomRegistries.KubeWebhookCertgenPatch | default .ImageRepository | default .Registries.KubeWebhookCertgenPatch }}{{.Images.KubeWebhookCertgenPatch}}
imagePullPolicy:
args:
- patch

View File

@ -218,11 +218,11 @@ spec:
serviceAccountName: istio-operator
containers:
- name: istio-operator
image: docker.io/istio/operator:1.5.0
image: {{.CustomRegistries.IstioOperator | default .ImageRepository | default .Registries.IstioOperator }}{{.Images.IstioOperator}}
command:
- operator
- server
imagePullPolicy: Always
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 200m

View File

@ -50,7 +50,7 @@ spec:
- /bin/bash
- -c
- /kubevirt-scripts/install.sh
image: bitnami/kubectl:1.17
image: {{.CustomRegistries.Kubectl | default .ImageRepository | default .Registries.Kubectl }}{{.Images.Kubectl}}
imagePullPolicy: IfNotPresent
name: kubevirt-provisioner
lifecycle:

View File

@ -41,8 +41,8 @@ spec:
serviceAccountName: sa-logviewer
containers:
- name: logviewer
imagePullPolicy: Always
image: {{default "docker.io/ivans3" .ImageRepository}}/minikube-log-viewer:latest
imagePullPolicy: IfNotPresent
image: {{.CustomRegistries.LogViewer | default .ImageRepository | default .Registries.LogViewer }}{{.Images.LogViewer}}
volumeMounts:
- name: logs
mountPath: /var/log/containers/

View File

@ -212,7 +212,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.hostIP
image: {{default "metallb" .ImageRepository}}/speaker:v0.8.2
image: {{.CustomRegistries.Speaker | default .ImageRepository | default .Registries.Speaker }}{{.Images.Speaker}}
imagePullPolicy: IfNotPresent
name: speaker
ports:
@ -268,7 +268,7 @@ spec:
- args:
- --port=7472
- --config=config
image: {{default "metallb" .ImageRepository}}/controller:v0.8.2
image: {{.CustomRegistries.Controller | default .ImageRepository | default .Registries.Controller }}{{.Images.Controller}}
imagePullPolicy: IfNotPresent
name: controller
ports:

View File

@ -19,8 +19,8 @@ spec:
spec:
containers:
- name: metrics-server
image: {{default "k8s.gcr.io" .ImageRepository}}/metrics-server-{{.Arch}}:v0.2.1
imagePullPolicy: Always
image: {{.CustomRegistries.MetricsServer | default .ImageRepository | default .Registries.MetricsServer }}{{.Images.MetricsServer}}
imagePullPolicy: IfNotPresent
command:
- /metrics-server
- --source=kubernetes.summary_api:https://kubernetes.default?kubeletHttps=true&kubeletPort=10250&insecure=true

View File

@ -82,7 +82,7 @@ spec:
- $(OPERATOR_NAMESPACE)
- -writeStatusName
- ""
image: quay.io/operator-framework/olm@sha256:0d15ffb5d10a176ef6e831d7865f98d51255ea5b0d16403618c94a004d049373
image: {{.CustomRegistries.OLM | default .ImageRepository | default .Registries.OLM }}{{.Images.OLM}}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
@ -143,7 +143,7 @@ spec:
- '-namespace'
- olm
- -configmapServerImage=quay.io/operator-framework/configmap-operator-registry:latest
image: quay.io/operator-framework/olm@sha256:0d15ffb5d10a176ef6e831d7865f98d51255ea5b0d16403618c94a004d049373
image: {{.CustomRegistries.OLM | default .ImageRepository | default .Registries.OLM }}{{.Images.OLM}}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
@ -307,8 +307,8 @@ spec:
- "5443"
- --global-namespace
- olm
image: quay.io/operator-framework/olm@sha256:0d15ffb5d10a176ef6e831d7865f98d51255ea5b0d16403618c94a004d049373
imagePullPolicy: Always
image: {{.CustomRegistries.OLM | default .ImageRepository | default .Registries.OLM }}{{.Images.OLM}}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 5443
livenessProbe:
@ -346,6 +346,6 @@ metadata:
namespace: olm
spec:
sourceType: grpc
image: quay.io/operator-framework/upstream-community-operators:latest
image: {{.CustomRegistries.UpstreamCommunityOperators | default .ImageRepository | default .Registries.UpstreamCommunityOperators }}{{.Images.UpstreamCommunityOperators}}
displayName: Community Operators
publisher: OperatorHub.io

View File

@ -17,7 +17,7 @@ spec:
spec:
initContainers:
- name: update
image: {{default "registry.hub.docker.com/library" .ImageRepository}}/alpine:3.11
image: {{.CustomRegistries.Alpine | default .ImageRepository | default .Registries.Alpine }}{{.Images.Alpine}}
volumeMounts:
- name: etchosts
mountPath: /host-etc/hosts
@ -43,7 +43,7 @@ spec:
echo "Done."
containers:
- name: pause-for-update
image: {{default "gcr.io/google_containers" .ImageRepository}}/pause-amd64:3.1
image: {{.CustomRegistries.Pause | default .ImageRepository | default .Registries.Pause }}{{.Images.Pause}}
terminationGracePeriodSeconds: 30
volumes:
- name: etchosts

View File

@ -15,7 +15,7 @@ spec:
path: /var/lib/minikube/binaries
containers:
- name: core-dns-patcher
image: {{default "quay.io/rhdevelopers" .ImageRepository}}/core-dns-patcher
image: {{default "quay.io" .ImageRepository}}/{{.Images.CoreDNSPatcher}}
imagePullPolicy: IfNotPresent
# using the kubectl from the minikube instance
volumeMounts:

View File

@ -18,9 +18,9 @@ spec:
addonmanager.kubernetes.io/mode: Reconcile
spec:
containers:
- image: {{default "upmcenterprises" .ImageRepository}}/registry-creds:1.10
- image: {{.CustomRegistries.RegistryCreds | default .ImageRepository | default .Registries.RegistryCreds }}{{.Images.RegistryCreds}}
name: registry-creds
imagePullPolicy: Always
imagePullPolicy: IfNotPresent
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:

View File

@ -19,7 +19,7 @@ spec:
addonmanager.kubernetes.io/mode: Reconcile
spec:
containers:
- image: {{default "gcr.io/google_containers" .ImageRepository}}/kube-registry-proxy:0.4
- image: {{.CustomRegistries.KubeRegistryProxy | default .ImageRepository | default .Registries.KubeRegistryProxy }}{{.Images.KubeRegistryProxy}}
imagePullPolicy: IfNotPresent
name: registry-proxy
ports:

View File

@ -18,7 +18,7 @@ spec:
addonmanager.kubernetes.io/mode: Reconcile
spec:
containers:
- image: {{default "registry.hub.docker.com/library" .ImageRepository}}/registry:2.7.1
- image: {{.CustomRegistries.Registry | default .ImageRepository | default .Registries.Registry }}{{.Images.Registry}}
imagePullPolicy: IfNotPresent
name: registry
ports:

View File

@ -31,7 +31,7 @@ spec:
# kubernetes.io/hostname: minikube
hostNetwork: true
containers:
- image: {{default "quay.io/nixpanic" .ImageRepository}}/glusterfs-server:pr_fake-disk
- image: {{.CustomRegistries.GlusterfsServer | default .ImageRepository | default .Registries.GlusterfsServer }}{{.Images.GlusterfsServer}}
imagePullPolicy: IfNotPresent
name: glusterfs
env:

View File

@ -116,7 +116,7 @@ spec:
spec:
serviceAccountName: heketi-service-account
containers:
- image: {{default "heketi" .ImageRepository}}/heketi:latest
- image: {{.CustomRegistries.Heketi | default .ImageRepository | default .Registries.Heketi }}{{.Images.Heketi}}
imagePullPolicy: IfNotPresent
name: heketi
env:

View File

@ -106,8 +106,8 @@ spec:
serviceAccountName: glusterfile-provisioner
containers:
- name: glusterfile-provisioner
image: {{default "gluster" .ImageRepository}}/glusterfile-provisioner:latest
imagePullPolicy: Always
image: {{.CustomRegistries.GlusterfileProvisioner | default .ImageRepository | default .Registries.GlusterfileProvisioner }}{{.Images.GlusterfileProvisioner}}
imagePullPolicy: IfNotPresent
env:
- name: PROVISIONER_NAME
value: gluster.org/glusterfile

View File

@ -100,7 +100,7 @@ spec:
hostNetwork: true
containers:
- name: storage-provisioner
image: {{default "gcr.io/k8s-minikube" .ImageRepository}}/storage-provisioner:{{.StorageProvisionerVersion}}
image: {{.CustomRegistries.StorageProvisioner | default .ImageRepository | default .Registries.StorageProvisioner }}{{.Images.StorageProvisioner}}
command: ["/storage-provisioner"]
imagePullPolicy: IfNotPresent
volumeMounts:

View File

@ -23,7 +23,7 @@ spec:
containers:
- name: volume-snapshot-controller
# TODO(xyang): Replace with an official image when it is released
image: {{default "gcr.io/k8s-staging-csi" .ImageRepository}}/snapshot-controller:v2.0.0-rc2
image: {{.CustomRegistries.SnapshotController | default .ImageRepository | default .Registries.SnapshotController }}{{.Images.SnapshotController}}
args:
- "--v=5"
imagePullPolicy: Always
imagePullPolicy: IfNotPresent

View File

@ -19,3 +19,5 @@ sha256 25dc558fbabc987bd58c7eab5230121b258a7b0eb34a49dc6595f1c6f3969116 v1.18.2.
sha256 d5c6442e3990938badc966cdd1eb9ebe2fc11345452c233aa0d87ca38fbeed81 v1.18.3.tar.gz
sha256 74a4e916acddc6cf47ab5752bdebb6732ce2c028505ef57b7edc21d2da9039b6 v1.18.4.tar.gz
sha256 fc8a8e61375e3ce30563eeb0fd6534c4f48fc20300a72e6ff51cc99cb2703516 v1.19.0.tar.gz
sha256 6165c5b8212ea03be2a465403177318bfe25a54c3e8d66d720344643913a0223 v1.19.1.tar.gz
sha256 76fd7543bc92d4364a11060f43a5131893a76c6e6e9d6de3a6bb6292c110b631 v1.20.0.tar.gz

View File

@ -4,8 +4,8 @@
#
################################################################################
CRIO_BIN_VERSION = v1.19.0
CRIO_BIN_COMMIT = 99c925bebdd9e392f2d575e25f2e6a1082e6c232
CRIO_BIN_VERSION = v1.20.0
CRIO_BIN_COMMIT = d388528dbed26b93c5bc1c89623607a1e597aa57
CRIO_BIN_SITE = https://github.com/cri-o/cri-o/archive
CRIO_BIN_SOURCE = $(CRIO_BIN_VERSION).tar.gz
CRIO_BIN_DEPENDENCIES = host-go libgpgme

View File

@ -29,6 +29,7 @@ storage_driver = "overlay"
# List to pass options to the storage driver. Please refer to
# containers-storage.conf(5) to see all available storage options.
#storage_option = [
# "overlay.mountopt=nodev,metacopy=on",
#]
# The default log directory where all logs will go unless directly specified by
@ -92,11 +93,6 @@ grpc_max_recv_msg_size = 16777216
#default_ulimits = [
#]
# default_runtime is the _name_ of the OCI runtime to be used as the default.
# The name is matched against the runtimes map below. If this value is changed,
# the corresponding existing entry from the runtimes map below will be ignored.
default_runtime = "runc"
# If true, the runtime will not use pivot_root, but instead use MS_MOVE.
no_pivot = false
@ -131,6 +127,12 @@ selinux = false
# will be used. This option supports live configuration reload.
seccomp_profile = ""
# Changes the meaning of an empty seccomp profile. By default
# (and according to CRI spec), an empty profile means unconfined.
# This option tells CRI-O to treat an empty profile as the default profile,
# which might increase security.
seccomp_use_default_when_empty = false
# Used to change the name of the default AppArmor profile of CRI-O. The default
# profile name is "crio-default". This profile only takes effect if the user
# does not specify a profile via the Kubernetes Pod's metadata annotation. If
@ -141,6 +143,9 @@ apparmor_profile = "crio-default"
# Cgroup management implementation used for the runtime.
cgroup_manager = "systemd"
# Specify whether the image pull must be performed in a separate cgroup.
separate_pull_cgroup = ""
# List of default capabilities for containers. If it is empty or commented out,
# only the capabilities defined in the containers json file by the user/kube
# will be added.
@ -174,11 +179,6 @@ hooks_dir = [
"/usr/share/containers/oci/hooks.d",
]
# List of default mounts for each container. **Deprecated:** this option will
# be removed in future versions in favor of default_mounts_file.
default_mounts = [
]
# Path to the file specifying the defaults mounts for each container. The
# format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads
# its default mounts from the following two files:
@ -243,7 +243,8 @@ gid_mappings = ""
ctr_stop_timeout = 30
# manage_ns_lifecycle determines whether we pin and remove namespaces
# and manage their lifecycle
# and manage their lifecycle.
# This option is being deprecated, and will be unconditionally true in the future.
manage_ns_lifecycle = true
# drop_infra_ctr determines whether CRI-O drops the infra container
@ -259,6 +260,11 @@ namespaces_dir = "/var/run"
# pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle
pinns_path = "/usr/bin/pinns"
# default_runtime is the _name_ of the OCI runtime to be used as the default.
# The name is matched against the runtimes map below. If this value is changed,
# the corresponding existing entry from the runtimes map below will be ignored.
default_runtime = "runc"
# The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes.
# The runtime to use is picked based on the runtime_handler provided by the CRI.
# If no runtime_handler is provided, the runtime will be picked based on the level
@ -268,7 +274,8 @@ pinns_path = "/usr/bin/pinns"
# runtime_path = "/path/to/the/executable"
# runtime_type = "oci"
# runtime_root = "/path/to/the/root"
#
# privileged_without_host_devices = false
# allowed_annotations = []
# Where:
# - runtime-handler: name used to identify the runtime
# - runtime_path (optional, string): absolute path to the runtime executable in
@ -279,6 +286,14 @@ pinns_path = "/usr/bin/pinns"
# omitted, an "oci" runtime is assumed.
# - runtime_root (optional, string): root directory for storage of containers
# state.
# - privileged_without_host_devices (optional, bool): an option for restricting
# host devices from being passed to privileged containers.
# - allowed_annotations (optional, array of strings): an option for specifying
# a list of experimental annotations that this runtime handler is allowed to process.
# The currently recognized values are:
# "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod.
# "io.kubernetes.cri-o.Devices" for configuring devices for the pod.
# "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm.
[crio.runtime.runtimes.runc]
@ -287,6 +302,8 @@ runtime_type = "oci"
runtime_root = "/run/runc"
# crun is a fast and lightweight fully featured OCI runtime and C library for
# running containers
#[crio.runtime.runtimes.crun]

View File

@ -29,6 +29,7 @@
# List to pass options to the storage driver. Please refer to
# containers-storage.conf(5) to see all available storage options.
#storage_option = [
# "overlay.mountopt=nodev,metacopy=on",
#]
# The default log directory where all logs will go unless directly specified by
@ -92,11 +93,6 @@ grpc_max_recv_msg_size = 16777216
#default_ulimits = [
#]
# default_runtime is the _name_ of the OCI runtime to be used as the default.
# The name is matched against the runtimes map below. If this value is changed,
# the corresponding existing entry from the runtimes map below will be ignored.
default_runtime = "runc"
# If true, the runtime will not use pivot_root, but instead use MS_MOVE.
no_pivot = false
@ -131,6 +127,12 @@ selinux = false
# will be used. This option supports live configuration reload.
seccomp_profile = ""
# Changes the meaning of an empty seccomp profile. By default
# (and according to CRI spec), an empty profile means unconfined.
# This option tells CRI-O to treat an empty profile as the default profile,
# which might increase security.
seccomp_use_default_when_empty = false
# Used to change the name of the default AppArmor profile of CRI-O. The default
# profile name is "crio-default". This profile only takes effect if the user
# does not specify a profile via the Kubernetes Pod's metadata annotation. If
@ -141,6 +143,9 @@ apparmor_profile = "crio-default"
# Cgroup management implementation used for the runtime.
cgroup_manager = "systemd"
# Specify whether the image pull must be performed in a separate cgroup.
separate_pull_cgroup = ""
# List of default capabilities for containers. If it is empty or commented out,
# only the capabilities defined in the containers json file by the user/kube
# will be added.
@ -174,11 +179,6 @@ hooks_dir = [
"/usr/share/containers/oci/hooks.d",
]
# List of default mounts for each container. **Deprecated:** this option will
# be removed in future versions in favor of default_mounts_file.
default_mounts = [
]
# Path to the file specifying the defaults mounts for each container. The
# format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads
# its default mounts from the following two files:
@ -243,7 +243,8 @@ gid_mappings = ""
ctr_stop_timeout = 30
# manage_ns_lifecycle determines whether we pin and remove namespaces
# and manage their lifecycle
# and manage their lifecycle.
# This option is being deprecated, and will be unconditionally true in the future.
manage_ns_lifecycle = true
# drop_infra_ctr determines whether CRI-O drops the infra container
@ -259,6 +260,11 @@ namespaces_dir = "/var/run"
# pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle
pinns_path = ""
# default_runtime is the _name_ of the OCI runtime to be used as the default.
# The name is matched against the runtimes map below. If this value is changed,
# the corresponding existing entry from the runtimes map below will be ignored.
default_runtime = "runc"
# The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes.
# The runtime to use is picked based on the runtime_handler provided by the CRI.
# If no runtime_handler is provided, the runtime will be picked based on the level
@ -268,7 +274,8 @@ pinns_path = ""
# runtime_path = "/path/to/the/executable"
# runtime_type = "oci"
# runtime_root = "/path/to/the/root"
#
# privileged_without_host_devices = false
# allowed_annotations = []
# Where:
# - runtime-handler: name used to identify the runtime
# - runtime_path (optional, string): absolute path to the runtime executable in
@ -279,6 +286,14 @@ pinns_path = ""
# omitted, an "oci" runtime is assumed.
# - runtime_root (optional, string): root directory for storage of containers
# state.
# - privileged_without_host_devices (optional, bool): an option for restricting
# host devices from being passed to privileged containers.
# - allowed_annotations (optional, array of strings): an option for specifying
# a list of experimental annotations that this runtime handler is allowed to process.
# The currently recognized values are:
# "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod.
# "io.kubernetes.cri-o.Devices" for configuring devices for the pod.
# "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm.
[crio.runtime.runtimes.runc]
@ -287,6 +302,8 @@ runtime_type = "oci"
runtime_root = "/run/runc"
# crun is a fast and lightweight fully featured OCI runtime and C library for
# running containers
#[crio.runtime.runtimes.crun]

View File

@ -25,3 +25,5 @@ sha256 ddb13aff1fcdcceb710bf71a210169b9c1abfd7420eeaf42cf7975f8fae2fcc8 docker-
sha256 9f1ec28e357a8f18e9561129239caf9c0807d74756e21cc63637c7fdeaafe847 docker-19.03.14.tgz
sha256 02936a3585f12f13b21b95e02ae722d74eaf1870b536997e914659ee307b2ac4 docker-20.10.0.tgz
sha256 8790f3b94ee07ca69a9fdbd1310cbffc729af0a07e5bf9f34a79df1e13d2e50e docker-20.10.1.tgz
sha256 97017e32a8ecbdd1826bb3c7b1424303ee0dea3f900d33591b1df5e394ed4eed docker-20.10.2.tgz
sha256 47065a47f0692cd5af03073c7386fe090d9ef5ac88a7d8455a884d8e15809be5 docker-20.10.3.tgz

View File

@ -4,7 +4,7 @@
#
################################################################################
DOCKER_BIN_VERSION = 20.10.1
DOCKER_BIN_VERSION = 20.10.3
DOCKER_BIN_SITE = https://download.docker.com/linux/static/stable/x86_64
DOCKER_BIN_SOURCE = docker-$(DOCKER_BIN_VERSION).tgz

View File

@ -5,10 +5,7 @@
################################################################################
GLUSTER_VERSION = 4.1.5
# Official gluster site has SSL problems
# https://bugzilla.redhat.com/show_bug.cgi?id=1572944
# GLUSTER_SITE = https://download.gluster.org/pub/gluster/glusterfs/4.1/$(GLUSTER_VERSION)
GLUSTER_SITE = http://download.openpkg.org/components/cache/glusterfs
GLUSTER_SITE = https://download.gluster.org/pub/gluster/glusterfs/01.old-releases/4.1/$(GLUSTER_VERSION)
GLUSTER_SOURCE = glusterfs-$(GLUSTER_VERSION).tar.gz
GLUSTER_CONF_OPTS = --disable-tiering --disable-ec-dynamic --disable-xmltest --disable-crypt-xlator --disable-georeplication --disable-ibverbs --disable-glupy --disable-gnfs --disable-cmocka --without-server
GLUSTER_INSTALL_TARGET_OPTS = DESTDIR=$(TARGET_DIR) install

View File

@ -1,4 +1,4 @@
sha256 a16846fe076aaf2c9ea2e854c3baba9fb838d916be7fb4b5be332e6c92d907d4 v1.9.3.tar.gz
sha256 5ebaa6e0dbd7fd1863f70d2bc71dc8a94e195c3339c17e3cac4560c9ec5747f8 v2.1.1.tar.gz
sha256 ec5473e51fa28f29af323473fc484f742dc7df23d06d8ba9f217f13382893a71 v2.2.0.tar.gz
sha256 bd86b181251e2308cb52f18410fb52d89df7f130cecf0298bbf9a848fe7daf60 v2.2.1.tar.gz
sha256 3212bad60d945c1169b27da03959f36d92d1d8964645c701a5a82a89118e96d1 v2.2.1.tar.gz

View File

@ -126,7 +126,7 @@ RUN sh -c "echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/li
clean-install containers-common catatonit conmon containernetworking-plugins cri-tools podman-plugins
# install cri-o based on https://github.com/cri-o/cri-o/blob/release-1.19/README.md#installing-cri-o
RUN sh -c "echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/1.19/xUbuntu_20.04/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:1.18.list" && \
RUN sh -c "echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/1.19/xUbuntu_20.04/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:1.19.list" && \
curl -LO https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/1.19/xUbuntu_20.04/Release.key && \
apt-key add - < Release.key && \
clean-install cri-o cri-o-runc
@ -187,5 +187,5 @@ RUN mkdir -p /kind
RUN rm -rf \
/usr/share/doc/* \
/usr/share/man/* \
/usr/share/local/* \
/usr/share/local/*
RUN echo "kic! Build: ${COMMIT_SHA} Time :$(date)" > "/kic.txt"

View File

@ -283,5 +283,7 @@ fix_product_uuid
select_iptables
enable_network_magic
echo "entrypoint completed: $(uname -a)"
# we want the command (expected to be systemd) to be PID1, so exec to it
exec "$@"

View File

@ -1,4 +1,20 @@
[
{
"name": "v1.17.1",
"checksums": {
"darwin": "c9361152a0a4aaed23d212b792f1907454f5fdd950f0cf9ac65c789744acf5ac",
"linux": "03a6d6cccecb7a33a09afc6dae40d8d76ccfe168aa4aba1a18c1f45bbab120c2",
"windows": "5e1d57379aa729b0a9247d5be6617906ebb7e934105df06eb6b24dda08899d3e"
}
},
{
"name": "v1.17.0",
"checksums": {
"darwin": "ad2b4de4b3f8863c2cfa9f5072cdc787141b0587fb9855dd645242253489fab3",
"linux": "e312901e12c347d0e4eec74d94b8d75512943eb62479b441bb1332f05cde0d09",
"windows": "dcae6ee972a49c4389d5e3ea81039b826cda55fefbe23b9273eeb46514abe244"
}
},
{
"name": "v1.16.0",
"checksums": {

View File

@ -0,0 +1,45 @@
# CRI: Containerd by default
* First proposed: 2020-11-08
* Authors: Anders F Björklund (@afbjorklund)
## Reviewer Priorities
Please review this proposal with the following priorities:
* Does this fit with minikube's [principles](https://minikube.sigs.k8s.io/docs/concepts/principles/)?
* Are there other approaches to consider?
* Could the implementation be made simpler?
* Are there usability, reliability, or technical debt concerns?
Please leave the above text in your proposal as instructions to the reader.
## Summary
Change default container runtime, from current "docker" to replacement "containerd".
## Goals
* Change from docker to containerd as default
* Still allow fast-building images with minikube
## Non-Goals
* Remove the docker support from minikube
* Change anything in the docker driver
## Design Details
The containerd container runtime is already included, and is passing certification.
Unlike [Docker](https://www.docker.com/products/docker-engine), will need to include the CRI (runtime) and CNI (network) by default.
Use [BuildKit](https://github.com/moby/buildkit) as a complement to [Containerd](https://containerd.io/), for producing an image from Dockerfile.
Only run buildkitd on-demand (i.e. when building), default to running only containerd.
## Alternatives Considered
Keep Docker as the default, and add the new CRI-Docker to replace the old dockershim.
Use [CRI-O](https://cri-o.io/)/[Podman](https://podman.io/) as default, which is a bigger change (since dockerd uses containerd).

View File

@ -0,0 +1,41 @@
# Standard Linux Distribution
* First proposed: 2020-12-17
* Authors: Anders F Björklund (@afbjorklund)
## Reviewer Priorities
Please review this proposal with the following priorities:
* Does this fit with minikube's [principles](https://minikube.sigs.k8s.io/docs/concepts/principles/)?
* Are there other approaches to consider?
* Could the implementation be made simpler?
* Are there usability, reliability, or technical debt concerns?
Please leave the above text in your proposal as instructions to the reader.
## Summary
Change the distribution (OS) for the minikube ISO, from Buildroot to Ubuntu.
## Goals
* Use one of the supported Kubernetes OS, like Ubuntu 20.04
* Use the same operating system for KIC base and ISO image
## Non-Goals
* Making major changes to the new standard operating system
* Support production deployments, still intended for learning
## Design Details
Use external system image and external packages, same as for KIC image.
Keep both images available (one being default), during transition period.
## Alternatives Considered
Continue to support custom distro, instead of using a standard distro.
Make current Buildroot OS into standard supported Kubernetes distribution.

34
go.mod
View File

@ -8,8 +8,8 @@ require (
github.com/Azure/azure-sdk-for-go v42.3.0+incompatible
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v0.13.0
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 // indirect
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
github.com/Parallels/docker-machine-parallels/v2 v2.0.1
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
github.com/VividCortex/godaemon v0.0.0-20201030160542-15e3f4925a21
github.com/blang/semver v3.5.0+incompatible
github.com/briandowns/spinner v1.11.1
@ -20,16 +20,15 @@ require (
github.com/cloudevents/sdk-go/v2 v2.1.0
github.com/cloudfoundry-attic/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21
github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 // indirect
github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57 // indirect
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7 // indirect
github.com/docker/cli v0.0.0-20200303162255-7d407207c304 // indirect
github.com/docker/docker v17.12.0-ce-rc1.0.20181225093023-5ddb1d410a8b+incompatible
github.com/docker/go-units v0.4.0
github.com/docker/machine v0.16.2 // v0.16.2^
github.com/docker/machine v0.16.2
github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f
github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f // indirect
github.com/evanphx/json-patch v4.5.0+incompatible // indirect
github.com/go-logr/logr v0.3.0 // indirect
github.com/go-ole/go-ole v1.2.4 // indirect
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
github.com/google/go-cmp v0.5.2
github.com/google/go-containerregistry v0.3.0
@ -38,14 +37,16 @@ require (
github.com/google/slowjam v0.0.0-20200530021616-df27e642fe7b
github.com/google/uuid v1.1.1
github.com/googleapis/gnostic v0.3.0 // indirect
github.com/hashicorp/go-getter v1.4.2
github.com/hashicorp/go-getter v1.5.1
github.com/hashicorp/go-retryablehttp v0.6.6
github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect
github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214 // indirect
github.com/hooklift/iso9660 v0.0.0-20170318115843-1cf07e5970d8
github.com/intel-go/cpuid v0.0.0-20181003105527-1a4a6f06a1c6 // indirect
github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f
github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c
github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d // indirect
github.com/juju/fslock v0.0.0-20160525022230-4d5c94c67b4b
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 // indirect
github.com/juju/mutex v0.0.0-20180619145857-d21b13acf4bf
github.com/juju/retry v0.0.0-20180821225755-9058e192b216 // indirect
@ -59,7 +60,7 @@ require (
github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b
github.com/moby/hyperkit v0.0.0-20171020124204-a12cd7250bcd
github.com/olekukonko/tablewriter v0.0.4
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/go-digest v1.0.0
github.com/otiai10/copy v1.0.2
github.com/pborman/uuid v1.2.0
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2
@ -69,7 +70,7 @@ require (
github.com/pmezard/go-difflib v1.0.0
github.com/russross/blackfriday v1.5.3-0.20200218234912-41c5fccfd6f6 // indirect
github.com/samalba/dockerclient v0.0.0-20160414174713-91d7393ff859 // indirect
github.com/shirou/gopsutil v2.18.12+incompatible
github.com/shirou/gopsutil/v3 v3.20.12
github.com/spf13/cobra v1.0.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.7.0
@ -81,21 +82,20 @@ require (
go.opentelemetry.io/otel v0.13.0
go.opentelemetry.io/otel/sdk v0.13.0
golang.org/x/build v0.0.0-20190927031335-2835ba2e683f
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6
golang.org/x/mod v0.3.0
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
golang.org/x/sys v0.0.0-20200523222454-059865788121
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3
golang.org/x/text v0.3.3
google.golang.org/api v0.29.0
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect
gopkg.in/yaml.v2 v2.3.0
gotest.tools/v3 v3.0.2 // indirect
k8s.io/api v0.17.4
k8s.io/apimachinery v0.17.4
k8s.io/client-go v0.17.4
k8s.io/klog v1.0.0
k8s.io/api v0.18.8
k8s.io/apimachinery v0.18.8
k8s.io/client-go v0.18.8
k8s.io/klog/v2 v2.4.0
k8s.io/kubectl v0.0.0
k8s.io/kubernetes v1.18.5
@ -105,11 +105,9 @@ require (
replace (
git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999
github.com/briandowns/spinner => github.com/alonyb/spinner v1.12.1
github.com/docker/docker => github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7
github.com/docker/machine => github.com/machine-drivers/machine v0.7.1-0.20200810185219-7d42fed1b770
github.com/briandowns/spinner => github.com/alonyb/spinner v1.12.6
github.com/docker/machine => github.com/machine-drivers/machine v0.7.1-0.20200824110434-7da9b61f0a42
github.com/google/go-containerregistry => github.com/afbjorklund/go-containerregistry v0.1.2-0.20210101161202-de47504a564f
github.com/hashicorp/go-getter => github.com/afbjorklund/go-getter v1.4.1-0.20201020145846-c0da14b4bffe
github.com/samalba/dockerclient => github.com/sayboras/dockerclient v1.0.0
k8s.io/api => k8s.io/api v0.17.3
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.17.3

473
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -20,12 +20,13 @@
# The script expects the following env variables:
# OS_ARCH: The operating system and the architecture separated by a hyphen '-' (e.g. darwin-amd64, linux-amd64, windows-amd64)
# VM_DRIVER: the driver to use for the test
# CONTAINER_RUNTIME: the container runtime to use for the test
# EXTRA_START_ARGS: additional flags to pass into minikube start
# EXTRA_TEST_ARGS: additional flags to pass into go test
# JOB_NAME: the name of the logfile and check name to update on github
readonly TEST_ROOT="${HOME}/minikube-integration"
readonly TEST_HOME="${TEST_ROOT}/${OS_ARCH}-${VM_DRIVER}-${MINIKUBE_LOCATION}-$$-${COMMIT}"
readonly TEST_HOME="${TEST_ROOT}/${OS_ARCH}-${VM_DRIVER}-${CONTAINER_RUNTIME}-${MINIKUBE_LOCATION}-$$-${COMMIT}"
export GOPATH="$HOME/go"
export KUBECONFIG="${TEST_HOME}/kubeconfig"
export PATH=$PATH:"/usr/local/bin/:/usr/local/go/bin/:$GOPATH/bin"
@ -52,6 +53,7 @@ echo ""
echo "arch: ${OS_ARCH}"
echo "build: ${MINIKUBE_LOCATION}"
echo "driver: ${VM_DRIVER}"
echo "runtime: ${CONTAINER_RUNTIME}"
echo "job: ${JOB_NAME}"
echo "test home: ${TEST_HOME}"
echo "sudo: ${SUDO_PREFIX}"
@ -298,6 +300,12 @@ if test -f "${TEST_OUT}"; then
rm "${TEST_OUT}" || true # clean up previous runs of same build
fi
touch "${TEST_OUT}"
if [ ! -z "${CONTAINER_RUNTIME}" ]
then
EXTRA_START_ARGS="${EXTRA_START_ARGS} --container-runtime=${CONTAINER_RUNTIME}"
fi
${SUDO_PREFIX}${E2E_BIN} \
-minikube-start-args="--driver=${VM_DRIVER} ${EXTRA_START_ARGS}" \
-test.timeout=${TIMEOUT} -test.v \
@ -352,9 +360,9 @@ fi
echo ">> Installing gopogh"
if [ "$(uname)" != "Darwin" ]; then
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-linux-amd64 && sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-linux-amd64 && sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
else
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.3.0/gopogh-darwin-amd64 && sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.4.0/gopogh-darwin-amd64 && sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
fi
echo ">> Running gopogh"
@ -385,8 +393,8 @@ if grep -q html "$HTML_OUT"; then
fi
echo ">> Cleaning up after ourselves ..."
${SUDO_PREFIX}${MINIKUBE_BIN} tunnel --cleanup || true
${SUDO_PREFIX}${MINIKUBE_BIN} delete --all --purge >/dev/null 2>/dev/null || true
timeout 3m ${SUDO_PREFIX}${MINIKUBE_BIN} tunnel --cleanup || true
timeout 5m ${SUDO_PREFIX}${MINIKUBE_BIN} delete --all --purge >/dev/null 2>/dev/null || true
cleanup_stale_routes || true
${SUDO_PREFIX} rm -Rf "${MINIKUBE_HOME}" || true

View File

@ -36,85 +36,87 @@ logger "cleanup_and_reboot is happening!"
# kill jenkins to avoid an incoming request
killall java
# clean minikube left overs
echo -e "\ncleanup minikube..."
for user in "jenkins" "root"; do
minikube="$(sudo su - ${user} -c 'command -v minikube')"
if [ ! -x "${minikube}" ]; then
minikube="/tmp/minikube"
curl -sfL https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 -o "${minikube}" && chmod +x "${minikube}" || true
fi
if [ -x "${minikube}" ]; then
if sudo su - "${user}" -c "${minikube} delete --all --purge" >/dev/null 2>&1; then
echo "successfully cleaned up minikube for ${user} user using ${minikube}"
function cleanup() {
# clean minikube left overs
echo -e "\ncleanup minikube..."
for user in "jenkins" "root"; do
minikube="$(sudo su - ${user} -c 'command -v minikube')"
if [ ! -x "${minikube}" ]; then
minikube="/tmp/minikube"
curl -sfL https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 -o "${minikube}" && chmod +x "${minikube}" || true
fi
fi
sudo killall --user "${user}" minikube >/dev/null 2>&1 || true
done
# clean docker left overs
echo -e "\ncleanup docker..."
docker kill $(docker ps -aq) >/dev/null 2>&1 || true
docker system prune --volumes --force || true
# clean KVM left overs
echo -e "\ncleanup kvm..."
overview() {
echo -e "\n - KVM domains:"
sudo virsh list --all || true
echo " - KVM pools:"
sudo virsh pool-list --all || true
echo " - KVM networks:"
sudo virsh net-list --all || true
echo " - host networks:"
sudo ip link show || true
}
echo -e "\nbefore the cleanup:"
overview
for DOM in $( sudo virsh list --all --name ); do
if sudo virsh destroy "${DOM}"; then
if sudo virsh undefine "${DOM}"; then
echo "successfully deleted KVM domain:" "${DOM}"
continue
fi
echo "unable to delete KVM domain:" "${DOM}"
fi
done
#for POOL in $( sudo virsh pool-list --all --name ); do # better, but flag '--name' is not supported for 'virsh pool-list' command on older libvirt versions
for POOL in $( sudo virsh pool-list --all | awk 'NR>2 {print $1}' ); do
for VOL in $( sudo virsh vol-list "${POOL}" ); do
if sudo virsh vol-delete --pool "${POOL}" "${VOLUME}"; then # flag '--delete-snapshots': "delete snapshots associated with volume (must be supported by storage driver)"
echo "successfully deleted KVM pool/volume:" "${POOL}"/"${VOL}"
continue
fi
echo "unable to delete KVM pool/volume:" "${POOL}"/"${VOL}"
done
done
for NET in $( sudo virsh net-list --all --name ); do
if [ "${NET}" != "default" ]; then
if sudo virsh net-destroy "${NET}"; then
if sudo virsh net-undefine "${NET}"; then
echo "successfully deleted KVM network" "${NET}"
continue
if [ -x "${minikube}" ]; then
if sudo su - "${user}" -c "${minikube} delete --all --purge" >/dev/null 2>&1; then
echo "successfully cleaned up minikube for ${user} user using ${minikube}"
fi
fi
echo "unable to delete KVM network" "${NET}"
fi
done
# DEFAULT_BRIDGE is a bridge connected to the 'default' KVM network
DEFAULT_BRIDGE=$( sudo virsh net-info default | awk '{ if ($1 == "Bridge:") print $2 }' )
echo "bridge connected to the 'default' KVM network to leave alone:" "${DEFAULT_BRIDGE}"
for VIF in $( sudo ip link show | awk -v defvbr="${DEFAULT_BRIDGE}.*" -F': ' '$2 !~ defvbr { if ($2 ~ /virbr.*/ || $2 ~ /vnet.*/) print $2 }' ); do
if sudo ip link delete "${VIF}"; then
echo "successfully deleted KVM interface" "${VIF}"
continue
fi
echo "unable to delete KVM interface" "${VIF}"
done
echo -e "\nafter the cleanup:"
overview
sudo killall --user "${user}" minikube >/dev/null 2>&1 || true
done
# clean docker left overs
echo -e "\ncleanup docker..."
docker kill $(docker ps -aq) >/dev/null 2>&1 || true
docker system prune --volumes --force || true
# Linux-specific cleanup
# clean KVM left overs
echo -e "\ncleanup kvm..."
overview() {
echo -e "\n - KVM domains:"
sudo virsh list --all || true
echo " - KVM pools:"
sudo virsh pool-list --all || true
echo " - KVM networks:"
sudo virsh net-list --all || true
echo " - host networks:"
sudo ip link show || true
}
echo -e "\nbefore the cleanup:"
overview
for DOM in $( sudo virsh list --all --name ); do
if sudo virsh destroy "${DOM}"; then
if sudo virsh undefine "${DOM}"; then
echo "successfully deleted KVM domain:" "${DOM}"
continue
fi
echo "unable to delete KVM domain:" "${DOM}"
fi
done
#for POOL in $( sudo virsh pool-list --all --name ); do # better, but flag '--name' is not supported for 'virsh pool-list' command on older libvirt versions
for POOL in $( sudo virsh pool-list --all | awk 'NR>2 {print $1}' ); do
for VOL in $( sudo virsh vol-list "${POOL}" ); do
if sudo virsh vol-delete --pool "${POOL}" "${VOLUME}"; then # flag '--delete-snapshots': "delete snapshots associated with volume (must be supported by storage driver)"
echo "successfully deleted KVM pool/volume:" "${POOL}"/"${VOL}"
continue
fi
echo "unable to delete KVM pool/volume:" "${POOL}"/"${VOL}"
done
done
for NET in $( sudo virsh net-list --all --name ); do
if [ "${NET}" != "default" ]; then
if sudo virsh net-destroy "${NET}"; then
if sudo virsh net-undefine "${NET}"; then
echo "successfully deleted KVM network" "${NET}"
continue
fi
fi
echo "unable to delete KVM network" "${NET}"
fi
done
# DEFAULT_BRIDGE is a bridge connected to the 'default' KVM network
DEFAULT_BRIDGE=$( sudo virsh net-info default | awk '{ if ($1 == "Bridge:") print $2 }' )
echo "bridge connected to the 'default' KVM network to leave alone:" "${DEFAULT_BRIDGE}"
for VIF in $( sudo ip link show | awk -v defvbr="${DEFAULT_BRIDGE}.*" -F': ' '$2 !~ defvbr { if ($2 ~ /virbr.*/ || $2 ~ /vnet.*/) print $2 }' ); do
if sudo ip link delete "${VIF}"; then
echo "successfully deleted KVM interface" "${VIF}"
continue
fi
echo "unable to delete KVM interface" "${VIF}"
done
echo -e "\nafter the cleanup:"
overview
}
# Give 15m for Linux-specific cleanup
timeout 15m cleanup
# disable localkube, kubelet
systemctl list-unit-files --state=enabled \

View File

@ -0,0 +1,76 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -x
# Make sure docker is installed and configured
./hack/jenkins/installers/check_install_docker.sh
yes|gcloud auth configure-docker
docker login -u ${DOCKERHUB_USER} -p ${DOCKERHUB_PASS}
docker login https://docker.pkg.github.com -u minikube-bot -p ${access_token}
# Setup variables
now=$(date +%s)
KV=$(egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f 2 | cut -d "-" -f 1)
GCR_REPO=gcr.io/k8s-minikube/kicbase-builds
DH_REPO=kicbase/build
GH_REPO=kicbase-build
export KIC_VERSION=$KV-$now-$ghprbPullId
GCR_IMG=${GCR_REPO}:${KIC_VERSION}
DH_IMG=${DH_REPO}:${KIC_VERSION}
GH_IMG=docker.pkg.github.com/kubernetes/minikube/${GH_REPO}:${KIC_VERSION}
export KICBASE_IMAGE_REGISTRIES="${GCR_IMG} ${DH_IMG}"
# Let's make sure we have the newest kicbase reference
curl -L https://github.com/kubernetes/minikube/raw/master/pkg/drivers/kic/types.go --output types-head.go
# kicbase tags are of the form VERSION-TIMESTAMP-PR, so this grep finds that TIMESTAMP in the middle
# if it doesn't exist, it will just return VERSION, which is covered in the if statement below
HEAD_KIC_TIMESTAMP=$(egrep "Version =" types-head.go | cut -d \" -f 2 | cut -d "-" -f 2)
CURRENT_KIC_TS=$(egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f 2 | cut -d "-" -f 2)
if [[ $HEAD_KIC_TIMESTAMP != v* ]]; then
diff=$((CURRENT_KIC_TS-HEAD_KIC_TIMESTAMP))
if [[ $CURRENT_KIC_TS == v* ]] || [ $diff -lt 0 ]; then
curl -s -H "Authorization: token ${access_token}" \
-H "Accept: application/vnd.github.v3+json" \
-X POST -d "{\"body\": \"Hi ${ghprbPullAuthorLoginMention}, your kicbase info is out of date. Please rebase.\"}" "https://api.github.com/repos/kubernetes/minikube/issues/$ghprbPullId/comments"
exit 1
fi
fi
rm types-head.go
# Build a new kicbase image
yes|make push-kic-base-image
# Abort with error message if above command failed
ec=$?
if [ $ec -gt 0 ]; then
curl -s -H "Authorization: token ${access_token}" \
-H "Accept: application/vnd.github.v3+json" \
-X POST -d "{\"body\": \"Hi ${ghprbPullAuthorLoginMention}, building a new kicbase image failed, please try again.\"}" "https://api.github.com/repos/kubernetes/minikube/issues/$ghprbPullId/comments"
exit $ec
fi
# Retrieve the sha from the new image
docker pull $GCR_IMG
fullsha=$(docker inspect --format='{{index .RepoDigests 0}}' $KICBASE_IMAGE_REGISTRIES)
sha=$(echo ${fullsha} | cut -d ":" -f 2)
# Display the message to the user
message="Hi ${ghprbPullAuthorLoginMention},\\n\\nA new kicbase image is available, please update your PR with the new tag and SHA.\\nIn pkg/drivers/kic/types.go:\\n\\n\\t// Version is the current version of kic\\n\\tVersion = \\\"${KIC_VERSION}\\\"\\n\\t// SHA of the kic base image\\n\\tbaseImageSHA = \\\"${sha}\\\"\\n\\t// The name of the GCR kicbase repository\\n\\tgcrRepo = \\\"${GCR_REPO}\\\"\\n\\t// The name of the Dockerhub kicbase repository\\n\\tdockerhubRepo = \\\"${DH_REPO}\\\"\\nThen run \`make generate-docs\` to update our documentation to reference the new image.\n\nAlternatively, run the following command and commit the changes:\\n\`\`\`\\n sed 's|Version = .*|Version = \\\"${KIC_VERSION}\\\"|;s|baseImageSHA = .*|baseImageSHA = \\\"${sha}\\\"|;s|gcrRepo = .*|gcrRepo = \\\"${GCR_REPO}\\\"|;s|dockerhubRepo = .*|dockerhubRepo = \\\"${DH_REPO}\\\"|' pkg/drivers/kic/types.go > new-types.go; mv new-types.go pkg/drivers/kic/types.go; make generate-docs;\\n\`\`\`"
curl -s -H "Authorization: token ${access_token}" \
-H "Accept: application/vnd.github.v3+json" \
-X POST -d "{\"body\": \"${message}\"}" "https://api.github.com/repos/kubernetes/minikube/issues/$ghprbPullId/comments"

View File

@ -28,6 +28,7 @@ set -e
OS_ARCH="linux-amd64"
VM_DRIVER="docker"
JOB_NAME="Docker_Linux"
CONTAINER_RUNTIME="docker"
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"

View File

@ -0,0 +1,41 @@
#!/bin/bash
# Copyright 2019 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs the integration tests on a Linux machine for the KVM Driver
# The script expects the following env variables:
# MINIKUBE_LOCATION: GIT_COMMIT from upstream build.
# COMMIT: Actual commit ID from upstream build
# EXTRA_BUILD_ARGS (optional): Extra args to be passed into the minikube integrations tests
# access_token: The Github API access token. Injected by the Jenkins credential provider.
set -e
OS_ARCH="linux-amd64"
VM_DRIVER="docker"
JOB_NAME="Docker_Linux_containerd"
CONTAINER_RUNTIME="containerd"
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"
# removing possible left over docker containers from previous runs
docker rm -f -v $(docker ps -aq) >/dev/null 2>&1 || true
source ./common.sh

View File

@ -0,0 +1,41 @@
#!/bin/bash
# Copyright 2019 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs the integration tests on a Linux machine for the KVM Driver
# The script expects the following env variables:
# MINIKUBE_LOCATION: GIT_COMMIT from upstream build.
# COMMIT: Actual commit ID from upstream build
# EXTRA_BUILD_ARGS (optional): Extra args to be passed into the minikube integrations tests
# access_token: The Github API access token. Injected by the Jenkins credential provider.
set -e
OS_ARCH="linux-amd64"
VM_DRIVER="docker"
JOB_NAME="Docker_Linux_crio"
CONTAINER_RUNTIME="crio"
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"
# removing possible left over docker containers from previous runs
docker rm -f -v $(docker ps -aq) >/dev/null 2>&1 || true
source ./common.sh

View File

@ -0,0 +1,42 @@
#!/bin/bash
# Copyright 2019 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs the integration tests on a Linux machine for the KVM Driver
# The script expects the following env variables:
# MINIKUBE_LOCATION: GIT_COMMIT from upstream build.
# COMMIT: Actual commit ID from upstream build
# EXTRA_BUILD_ARGS (optional): Extra args to be passed into the minikube integrations tests
# access_token: The Github API access token. Injected by the Jenkins credential provider.
set -e
OS_ARCH="linux-amd64"
VM_DRIVER="kvm2"
JOB_NAME="KVM_Linux_containerd"
CONTAINER_RUNTIME="containerd"
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"
sudo apt-get update
sudo apt-get -y install qemu-system libvirt-clients libvirt-daemon-system ebtables iptables dnsmasq
sudo adduser jenkins libvirt || true
source ./common.sh

View File

@ -0,0 +1,42 @@
#!/bin/bash
# Copyright 2019 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs the integration tests on a Linux machine for the KVM Driver
# The script expects the following env variables:
# MINIKUBE_LOCATION: GIT_COMMIT from upstream build.
# COMMIT: Actual commit ID from upstream build
# EXTRA_BUILD_ARGS (optional): Extra args to be passed into the minikube integrations tests
# access_token: The Github API access token. Injected by the Jenkins credential provider.
set -e
OS_ARCH="linux-amd64"
VM_DRIVER="kvm2"
JOB_NAME="KVM_Linux_crio"
CONTAINER_RUNTIME="crio"
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"
sudo apt-get update
sudo apt-get -y install qemu-system libvirt-clients libvirt-daemon-system ebtables iptables dnsmasq
sudo adduser jenkins libvirt || true
source ./common.sh

View File

@ -28,12 +28,11 @@ set -e
OS_ARCH="linux-amd64"
VM_DRIVER="podman"
JOB_NAME="Experimental_Podman_Linux"
CONTAINER_RUNTIME="containerd"
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"
EXTRA_START_ARGS="--container-runtime=containerd"
# remove possible left over podman containers
sudo podman rm -f -v $(sudo podman ps -aq) || true

View File

@ -36,10 +36,17 @@ declare -rx ISO_BUCKET="${bucket}/${ghprbPullId}"
declare -rx ISO_VERSION="testing"
declare -rx TAG="${ghprbActualCommit}"
declare -rx DEB_VER="$(make deb_version)"
docker kill $(docker ps -q) || true
docker rm $(docker ps -aq) || true
make -j 16 all && failed=$? || failed=$?
make -j 16 \
all \
minikube-darwin-arm64 \
out/minikube_${DEB_VER}_amd64.deb \
out/minikube_${DEB_VER}_arm64.deb \
out/docker-machine-driver-kvm2_$(make deb_version_base).deb \
&& failed=$? || failed=$?
"out/minikube-$(go env GOOS)-$(go env GOARCH)" version

View File

@ -32,17 +32,21 @@ if [ "${ghprbPullId}" == "master" ]; then
fi
jobs=(
'HyperKit_Functional_macOS'
# 'HyperKit_Functional_macOS'
'Hyper-V_Windows'
'VirtualBox_Linux'
# 'VirtualBox_macOS'
'VirtualBox_Windows'
# 'KVM-GPU_Linux' - Disabled
'KVM_Linux'
'KVM_Linux_containerd'
'KVM_Linux_crio'
'none_Linux'
'Docker_Linux'
'Docker_Linux_containerd'
'Docker_Linux_crio'
# 'Docker_macOS'
# 'Docker_Windows'
'Docker_Windows'
# 'Podman_Linux'
)

View File

@ -46,6 +46,7 @@ make verify-iso
env BUILD_IN_DOCKER=y \
make -j 16 \
all \
out/minikube-darwin-arm64 \
out/minikube-installer.exe \
"out/minikube_${DEB_VERSION}-0_amd64.deb" \
"out/minikube_${DEB_VERSION}-0_arm64.deb" \
@ -54,6 +55,11 @@ env BUILD_IN_DOCKER=y \
"out/docker-machine-driver-kvm2_${DEB_VERSION}-0_amd64.deb" \
"out/docker-machine-driver-kvm2-${RPM_VERSION}-0.x86_64.rpm"
# Don't upload temporary copies, avoid unused duplicate files in the release storage
rm -f out/minikube-linux-x86_64
rm -f out/minikube-linux-aarch64
rm -f out/minikube-windows-amd64
make checksum
# unversioned names to avoid updating upstream Kubernetes documentation each release

View File

@ -0,0 +1,35 @@
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Get-Process "*Docker Desktop*" | Stop-Process
$attempt = 1
while($attempt -le 10) {
Write-Host "Attempt ", $attempt
Write-Host "Wait for 2 minutes"
& "C:\Program Files\Docker\Docker\Docker Desktop.exe"
Start-Sleep 120
$dockerInfo = docker info
Write-Host "Docker Info ", $dockerInfo
$serverVersion = $dockerInfo | Where-Object {$_ -Match "Server Version"}
Write-Host "Server Version ", $serverVersion
if (![System.String]::IsNullOrEmpty($serverVersion)) {
Write-Host "Docker successfully started!"
exit 0
}
Write-Host "Restarting Docker Desktop"
Get-Process "*Docker Desktop*" | Stop-Process
$attempt += 1
}
exit 1

View File

@ -16,19 +16,65 @@ mkdir -p out
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/minikube-windows-amd64.exe out/
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/e2e-windows-amd64.exe out/
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata .
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/setup_docker_desktop_windows.ps1 out/
./out/setup_docker_desktop_windows.ps1
If ($lastexitcode -gt 0) {
echo "Docker failed to start, exiting."
Exit $lastexitcode
}
./out/minikube-windows-amd64.exe delete --all
out/e2e-windows-amd64.exe -minikube-start-args="--driver=docker" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m
docker ps -aq | ForEach -Process {docker rm -fv $_}
$started=Get-Date -UFormat %s
out/e2e-windows-amd64.exe --minikube-start-args="--driver=docker" --binary=out/minikube-windows-amd64.exe --test.v --test.timeout=180m | Tee-Object -FilePath testout.txt
$env:result=$lastexitcode
# If the last exit code was 0->success, x>0->error
If($env:result -eq 0){$env:status="success"}
Else {$env:status="failure"}
If($env:result -eq 0){
$env:status="success"
echo "minikube: SUCCESS"
} Else {
$env:status="failure"
echo "minikube: FAIL"
}
# $env:SHORT_COMMIT=$env:COMMIT.substring(0, 7)
# to be used later to implement https://github.com/kubernetes/minikube/issues/6593
$env:target_url="https://storage.googleapis.com/minikube-builds/logs/$env:MINIKUBE_LOCATION/Docker_Windows.txt"
$json = "{`"state`": `"$env:status`", `"description`": `"Jenkins`", `"target_url`": `"$env:target_url`", `"context`": `"Docker_Windows`"}"
$ended=Get-Date -UFormat %s
$elapsed=$ended-$started
$elapsed=$elapsed/60
$elapsed=[math]::Round($elapsed, 2)
Get-Content testout.txt -Encoding ASCII | go tool test2json -t | Out-File -FilePath testout.json -Encoding ASCII
$gopogh_status=gopogh --in testout.json --out testout.html --name "Docker_Windows" -pr $env:MINIKUBE_LOCATION --repo github.com/kubernetes/minikube/ --details $env:COMMIT
$failures=echo $gopogh_status | jq '.NumberOfFail'
$tests=echo $gopogh_status | jq '.NumberOfTests'
$bad_status="$failures / $tests failures"
$description="$status in $elapsed minute(s)."
If($env:status -eq "failure") {
$description="completed with $bad_status in $elapsed minute(s)."
}
echo $description
$env:SHORT_COMMIT=$env:COMMIT.substring(0, 7)
$gcs_bucket="minikube-builds/logs/$env:MINIKUBE_LOCATION/$env:SHORT_COMMIT"
$env:target_url="https://storage.googleapis.com/$gcs_bucket/Docker_Windows.html"
#Upload logs to gcs
gsutil -qm cp testout.txt gs://$gcs_bucket/Docker_Windowsout.txt
gsutil -qm cp testout.json gs://$gcs_bucket/Docker_Windows.json
gsutil -qm cp testout.html gs://$gcs_bucket/Docker_Windows.html
# Update the PR with the new info
$json = "{`"state`": `"$env:status`", `"description`": `"Jenkins: $description`", `"target_url`": `"$env:target_url`", `"context`": `"Docker_Windows`"}"
Invoke-WebRequest -Uri "https://api.github.com/repos/kubernetes/minikube/statuses/$env:COMMIT`?access_token=$env:access_token" -Body $json -ContentType "application/json" -Method Post -usebasicparsing
Exit $env:result
# Just shutdown Docker, it's safer than anything else
Get-Process "*Docker Desktop*" | Stop-Process
Exit $env:result

View File

@ -55,6 +55,11 @@ var (
`go-version: '.*`: `go-version: '{{.StableVersion}}'`,
},
},
".github/workflows/build.yml": {
Replace: map[string]string{
`go-version: '.*`: `go-version: '{{.StableVersion}}'`,
},
},
".github/workflows/master.yml": {
Replace: map[string]string{
`go-version: '.*`: `go-version: '{{.StableVersion}}'`,

View File

@ -25,6 +25,7 @@ apiServer:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:

View File

@ -25,6 +25,7 @@ apiServer:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:

View File

@ -25,6 +25,7 @@ apiServer:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:

View File

@ -27,6 +27,7 @@ apiServer:
feature-gates: "a=b"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
feature-gates: "a=b"
kube-api-burst: "32"
leader-elect: "false"

Some files were not shown because too many files have changed in this diff Show More