Merge branch 'master' of github.com:kubernetes/minikube into m2
commit
4af82913e1
|
@ -1,24 +1,21 @@
|
|||
name: CI
|
||||
on: [pull_request]
|
||||
jobs:
|
||||
docker_ubuntu_16_04:
|
||||
# Runs before all other jobs
|
||||
# builds the minikube binaries
|
||||
build_minikube:
|
||||
env:
|
||||
TIME_ELAPSED: time
|
||||
JOB_NAME: "Docker_Ubuntu_16_04"
|
||||
COMMIT_STATUS: ""
|
||||
runs-on: ubuntu-16.04
|
||||
GOPOGH_RESULT: ""
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: build binaries
|
||||
run : |
|
||||
make minikube-linux-amd64
|
||||
make e2e-linux-amd64
|
||||
mkdir -p report
|
||||
mkdir -p testhome
|
||||
pwd
|
||||
ls -lah
|
||||
cp -r test/integration/testdata ./
|
||||
ls -lah
|
||||
cp -r test/integration/testdata ./out
|
||||
whoami
|
||||
echo github ref $GITHUB_REF
|
||||
echo workflow $GITHUB_WORKFLOW
|
||||
|
@ -26,147 +23,307 @@ jobs:
|
|||
echo event name $GITHUB_EVENT_NAME
|
||||
echo workspace $GITHUB_WORKSPACE
|
||||
echo "end of debug stuff"
|
||||
|
||||
- name: install gopogh
|
||||
echo $(which jq)
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: minikube_binaries
|
||||
path: out
|
||||
lint:
|
||||
env:
|
||||
TIME_ELAPSED: time
|
||||
JOB_NAME: "lint"
|
||||
GOPOGH_RESULT: ""
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: install libvirt
|
||||
run : |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libvirt-dev
|
||||
- name: lint
|
||||
env:
|
||||
TESTSUITE: lintall
|
||||
run : make test
|
||||
continue-on-error: false
|
||||
unit_test:
|
||||
env:
|
||||
TIME_ELAPSED: time
|
||||
JOB_NAME: "unit_test"
|
||||
GOPOGH_RESULT: ""
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: install libvirt
|
||||
run : |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libvirt-dev
|
||||
- name: unit test
|
||||
env:
|
||||
TESTSUITE: unittest
|
||||
run :
|
||||
make test
|
||||
continue-on-error: false
|
||||
# Run the following integration tests after the build_minikube
|
||||
# They will run in parallel and use the binaries in previous step
|
||||
docker_ubuntu_16_04:
|
||||
needs: [build_minikube]
|
||||
env:
|
||||
TIME_ELAPSED: time
|
||||
JOB_NAME: "Docker_Ubuntu_16_04"
|
||||
GOPOGH_RESULT: ""
|
||||
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
|
||||
runs-on: ubuntu-16.04
|
||||
steps:
|
||||
- name: Install gopogh
|
||||
shell: bash
|
||||
run: |
|
||||
cd /tmp
|
||||
GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true
|
||||
cd -
|
||||
- name: run integration test
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64
|
||||
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
|
||||
- name: Download binaries
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: minikube_binaries
|
||||
- name: Run integration test
|
||||
continue-on-error: true
|
||||
# bash {0} to allow test to continue to next step. in case of
|
||||
shell: bash {0}
|
||||
run: |
|
||||
cd minikube_binaries
|
||||
mkdir -p report
|
||||
mkdir -p testhome
|
||||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
- name: generate gopogh report
|
||||
- name: Generate html report
|
||||
shell: bash
|
||||
run: |
|
||||
cd minikube_binaries
|
||||
export PATH=${PATH}:`go env GOPATH`/bin
|
||||
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
|
||||
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name " $GITHUB_REF" -repo "${JOB_NAME} ${GITHUB_REF} ${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
|
||||
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
|
||||
echo status: ${STAT}
|
||||
COMMIT_STATUS="${JOB_NAME} : Completed with ${STAT} in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=COMMIT_STATUS::${COMMIT_STATUS}
|
||||
- name: The End Result
|
||||
run: |
|
||||
echo time elapsed is ${TIME_ELAPSED}
|
||||
echo END RESULT IS ${COMMIT_STATUS}
|
||||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: docker_ubuntu_16_04
|
||||
path: report
|
||||
path: minikube_binaries/report
|
||||
- name: The End Result Docker on ubuntu 16:04
|
||||
shell: bash
|
||||
run: |
|
||||
echo ${GOPOGH_RESULT}
|
||||
numFail=$(echo $STAT | jq '.NumberOfFail')
|
||||
echo "----------------${numFail} Failures----------------------------"
|
||||
echo $STAT | jq '.FailedTests' || true
|
||||
echo "-------------------------------------------------------"
|
||||
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
|
||||
docker_ubuntu_18_04:
|
||||
runs-on: ubuntu-18.04
|
||||
env:
|
||||
TIME_ELAPSED: time
|
||||
JOB_NAME: "Docker_Ubuntu_18_04"
|
||||
COMMIT_STATUS: ""
|
||||
runs-on: ubuntu-18.04
|
||||
GOPOGH_RESULT: ""
|
||||
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
|
||||
needs: [build_minikube]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: build binaries
|
||||
run : |
|
||||
make minikube-linux-amd64
|
||||
make e2e-linux-amd64
|
||||
- name: Install gopogh
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64
|
||||
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
|
||||
- name: Download binaries
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: minikube_binaries
|
||||
- name: Run integration test
|
||||
continue-on-error: true
|
||||
# bash {0} to allow test to continue to next step. in case of
|
||||
shell: bash {0}
|
||||
run: |
|
||||
cd minikube_binaries
|
||||
mkdir -p report
|
||||
mkdir -p testhome
|
||||
pwd
|
||||
ls -lah
|
||||
cp -r test/integration/testdata ./
|
||||
ls -lah
|
||||
whoami
|
||||
- name: install gopogh
|
||||
run: |
|
||||
cd /tmp
|
||||
GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true
|
||||
cd -
|
||||
- name: run integration test
|
||||
run: |
|
||||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds"
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
- name: generate gopogh report
|
||||
- name: Generate html report
|
||||
shell: bash
|
||||
run: |
|
||||
cd minikube_binaries
|
||||
export PATH=${PATH}:`go env GOPATH`/bin
|
||||
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
|
||||
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name " $GITHUB_REF" -repo "${JOB_NAME} ${GITHUB_REF} ${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
|
||||
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
|
||||
echo status: ${STAT}
|
||||
COMMIT_STATUS="${JOB_NAME} : Completed with ${STAT} in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=COMMIT_STATUS::${COMMIT_STATUS}
|
||||
- name: The End Result
|
||||
run: |
|
||||
echo time elapsed is ${TIME_ELAPSED}
|
||||
echo END RESULT IS ${COMMIT_STATUS}
|
||||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: docker_ubuntu_18_04
|
||||
path: report
|
||||
path: minikube_binaries/report
|
||||
- name: The End Result - Docker On Ubuntu 18:04
|
||||
shell: bash
|
||||
run: |
|
||||
echo ${GOPOGH_RESULT}
|
||||
numFail=$(echo $STAT | jq '.NumberOfFail')
|
||||
echo "----------------${numFail} Failures----------------------------"
|
||||
echo $STAT | jq '.FailedTests' || true
|
||||
echo "-------------------------------------------------------"
|
||||
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
|
||||
none_ubuntu16_04:
|
||||
needs: [build_minikube]
|
||||
env:
|
||||
TIME_ELAPSED: time
|
||||
JOB_NAME: "None_Ubuntu_16_04"
|
||||
COMMIT_STATUS: ""
|
||||
GOPOGH_RESULT: ""
|
||||
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
|
||||
runs-on: ubuntu-16.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: build binaries
|
||||
run : |
|
||||
make minikube-linux-amd64
|
||||
make e2e-linux-amd64
|
||||
- name: Install gopogh
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64
|
||||
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
|
||||
- name: Download binaries
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: minikube_binaries
|
||||
- name: Run integration test
|
||||
continue-on-error: true
|
||||
# bash {0} to allow test to continue to next step. in case of
|
||||
shell: bash {0}
|
||||
run: |
|
||||
cd minikube_binaries
|
||||
mkdir -p report
|
||||
mkdir -p testhome
|
||||
pwd
|
||||
ls -lah
|
||||
cp -r test/integration/testdata ./
|
||||
ls -lah
|
||||
whoami
|
||||
- name: install gopogh
|
||||
run: |
|
||||
cd /tmp
|
||||
GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true
|
||||
cd -
|
||||
- name: run integration test
|
||||
run: |
|
||||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=none -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--vm-driver=none -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds"
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
- name: generate gopogh report
|
||||
- name: Generate html report
|
||||
shell: bash
|
||||
run: |
|
||||
cd minikube_binaries
|
||||
export PATH=${PATH}:`go env GOPATH`/bin
|
||||
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
|
||||
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name " $GITHUB_REF" -repo "${JOB_NAME} ${GITHUB_REF} ${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
|
||||
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
|
||||
echo status: ${STAT}
|
||||
COMMIT_STATUS="${JOB_NAME} : Completed with ${STAT} in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=COMMIT_STATUS::${COMMIT_STATUS}
|
||||
- name: The End Result
|
||||
run: |
|
||||
echo time elapsed is ${TIME_ELAPSED}
|
||||
echo END RESULT IS ${COMMIT_STATUS}
|
||||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: none_ubuntu16_04
|
||||
path: report
|
||||
path: minikube_binaries/report
|
||||
- name: The End Result - None On Ubuntu 16:04
|
||||
shell: bash
|
||||
run: |
|
||||
echo ${GOPOGH_RESULT}
|
||||
numFail=$(echo $STAT | jq '.NumberOfFail')
|
||||
echo "----------------${numFail} Failures----------------------------"
|
||||
echo $STAT | jq '.FailedTests' || true
|
||||
echo "-------------------------------------------------------"
|
||||
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
|
||||
none_ubuntu18_04:
|
||||
needs: [build_minikube]
|
||||
env:
|
||||
TIME_ELAPSED: time
|
||||
JOB_NAME: "None_Ubuntu_18_04"
|
||||
GOPOGH_RESULT: ""
|
||||
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Install gopogh
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64
|
||||
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
|
||||
- name: Download binaries
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: minikube_binaries
|
||||
- name: Run integration test
|
||||
continue-on-error: true
|
||||
# bash {0} to allow test to continue to next step. in case of
|
||||
shell: bash {0}
|
||||
run: |
|
||||
cd minikube_binaries
|
||||
mkdir -p report
|
||||
mkdir -p testhome
|
||||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--vm-driver=none -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
- name: Generate html report
|
||||
shell: bash
|
||||
run: |
|
||||
cd minikube_binaries
|
||||
export PATH=${PATH}:`go env GOPATH`/bin
|
||||
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
|
||||
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
|
||||
echo status: ${STAT}
|
||||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: none_ubuntu18_04
|
||||
path: minikube_binaries/report
|
||||
- name: The End Result - None on Ubuntu 18:04
|
||||
shell: bash
|
||||
run: |
|
||||
echo ${GOPOGH_RESULT}
|
||||
numFail=$(echo $STAT | jq '.NumberOfFail')
|
||||
echo "----------------${numFail} Failures----------------------------"
|
||||
echo $STAT | jq '.FailedTests' || true
|
||||
echo "-------------------------------------------------------"
|
||||
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
|
||||
podman_ubuntu_18_04:
|
||||
needs: [build_minikube]
|
||||
env:
|
||||
TIME_ELAPSED: time
|
||||
JOB_NAME: "Podman_Ubuntu_18_04"
|
||||
COMMIT_STATUS: ""
|
||||
GOPOGH_RESULT: ""
|
||||
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: install podman
|
||||
shell: bash
|
||||
run: |
|
||||
. /etc/os-release
|
||||
sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list"
|
||||
|
@ -174,100 +331,116 @@ jobs:
|
|||
sudo apt-key add - < Release.key || true
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get -qq -y install podman
|
||||
- name: build binaries
|
||||
run : |
|
||||
make minikube-linux-amd64
|
||||
make e2e-linux-amd64
|
||||
sudo podman version || true
|
||||
sudo podman info || true
|
||||
- name: Install gopogh
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64
|
||||
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
|
||||
- name: Download binaries
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: minikube_binaries
|
||||
- name: Run integration test
|
||||
continue-on-error: true
|
||||
# bash {0} to allow test to continue to next step. in case of
|
||||
shell: bash {0}
|
||||
run: |
|
||||
cd minikube_binaries
|
||||
mkdir -p report
|
||||
mkdir -p testhome
|
||||
pwd
|
||||
ls -lah
|
||||
cp -r test/integration/testdata ./
|
||||
cp -r test/integration/testdata ./
|
||||
ls -lah
|
||||
whoami
|
||||
- name: install gopogh
|
||||
run: |
|
||||
cd /tmp
|
||||
GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true
|
||||
cd -
|
||||
- name: run integration test
|
||||
run: |
|
||||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=podman -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--vm-driver=podman -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds"
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
- name: generate gopogh report
|
||||
- name: Generate html report
|
||||
shell: bash
|
||||
run: |
|
||||
cd minikube_binaries
|
||||
export PATH=${PATH}:`go env GOPATH`/bin
|
||||
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
|
||||
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name " $GITHUB_REF" -repo "${JOB_NAME} ${GITHUB_REF} ${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
|
||||
STAT=$(gopogh -in ./report/testout.json -out ./report/testout.html -name "${JOB_NAME} ${GITHUB_REF}" -repo "${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
|
||||
echo status: ${STAT}
|
||||
COMMIT_STATUS="${JOB_NAME} : Completed with ${STAT} in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=COMMIT_STATUS::${COMMIT_STATUS}
|
||||
- name: The End Result
|
||||
run: |
|
||||
echo time elapsed is ${TIME_ELAPSED}
|
||||
echo END RESULT IS ${COMMIT_STATUS}
|
||||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: podman_ubuntu_18_04
|
||||
path: report
|
||||
path: minikube_binaries/report
|
||||
- name: The End Result - Podman On Ubuntu 18:04
|
||||
shell: bash
|
||||
run: |
|
||||
echo ${GOPOGH_RESULT}
|
||||
numFail=$(echo $STAT | jq '.NumberOfFail')
|
||||
echo "----------------${numFail} Failures----------------------------"
|
||||
echo $STAT | jq '.FailedTests' || true
|
||||
echo "-------------------------------------------------------"
|
||||
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
|
||||
# After all 4 integration tests finished
|
||||
# collect all the reports and upload
|
||||
upload_all_reports:
|
||||
needs: [docker_ubuntu_16_04,docker_ubuntu_18_04,none_ubuntu16_04,podman_ubuntu_18_04]
|
||||
if: always()
|
||||
needs: [docker_ubuntu_16_04,docker_ubuntu_18_04,none_ubuntu16_04,none_ubuntu18_04,podman_ubuntu_18_04]
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: download results docker_ubuntu_16_04
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: docker_ubuntu_16_04
|
||||
- name: see if report is there
|
||||
shell: bash
|
||||
- name: cp docker_ubuntu_16_04 to all_report
|
||||
continue-on-error: true
|
||||
shell: bash {0}
|
||||
run: |
|
||||
pwd
|
||||
ls -lah
|
||||
ls -lah docker_ubuntu_16_04
|
||||
mkdir -p all_reports
|
||||
cp -r docker_ubuntu_16_04 ./all_reports/
|
||||
- name: download results docker_ubuntu_18_04
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: docker_ubuntu_18_04
|
||||
- name: see if report is there
|
||||
shell: bash
|
||||
- name: cp docker_ubuntu_18_04 to all_report
|
||||
continue-on-error: true
|
||||
shell: bash {0}
|
||||
run: |
|
||||
pwd
|
||||
ls -lah
|
||||
ls -lah docker_ubuntu_18_04
|
||||
mkdir -p all_reports
|
||||
cp -r docker_ubuntu_18_04 ./all_reports/
|
||||
- name: download results none_ubuntu16_04
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: none_ubuntu16_04
|
||||
- name: see if report is there
|
||||
shell: bash
|
||||
- name: cp none_ubuntu16_04 to all_report
|
||||
continue-on-error: true
|
||||
shell: bash {0}
|
||||
run: |
|
||||
pwd
|
||||
ls -lah
|
||||
ls -lah none_ubuntu16_04
|
||||
mkdir -p all_reports
|
||||
cp -r none_ubuntu16_04 ./all_reports/
|
||||
|
||||
- name: download results none_ubuntu18_04
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: none_ubuntu18_04
|
||||
- name: cp none_ubuntu18_04 to all_report
|
||||
continue-on-error: true
|
||||
shell: bash {0}
|
||||
run: |
|
||||
mkdir -p all_reports
|
||||
cp -r none_ubuntu18_04 ./all_reports/
|
||||
- name: download results podman_ubuntu_18_04
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: podman_ubuntu_18_04
|
||||
- name: see if report is there
|
||||
shell: bash
|
||||
- name: cp podman_ubuntu_18_04 to all_report
|
||||
continue-on-error: true
|
||||
shell: bash {0}
|
||||
run: |
|
||||
pwd
|
||||
ls -lah
|
||||
ls -lah podman_ubuntu_18_04
|
||||
mkdir -p all_reports
|
||||
cp -r podman_ubuntu_18_04 ./all_reports/
|
||||
- uses: actions/upload-artifact@v1
|
||||
|
|
44
CHANGELOG.md
44
CHANGELOG.md
|
@ -1,5 +1,49 @@
|
|||
# Release Notes
|
||||
|
||||
## Version 1.7.3 - 2020-02-20
|
||||
|
||||
* Add podman driver [#6515](https://github.com/kubernetes/minikube/pull/6515)
|
||||
* Create Hyper-V External Switch [#6264](https://github.com/kubernetes/minikube/pull/6264)
|
||||
* Don't allow creating profile by profile command [#6672](https://github.com/kubernetes/minikube/pull/6672)
|
||||
* Create the Node subcommands for multi-node refactor [#6556](https://github.com/kubernetes/minikube/pull/6556)
|
||||
* Improve docker volume clean up [#6695](https://github.com/kubernetes/minikube/pull/6695)
|
||||
* Add podman-env for connecting with podman-remote [#6351](https://github.com/kubernetes/minikube/pull/6351)
|
||||
* Update gvisor addon to latest runsc version [#6573](https://github.com/kubernetes/minikube/pull/6573)
|
||||
* Fix inverted start resource logic [#6700](https://github.com/kubernetes/minikube/pull/6700)
|
||||
* Fix bug in --install-addons flag [#6696](https://github.com/kubernetes/minikube/pull/6696)
|
||||
* Fix bug in docker-env and add tests for docker-env command [#6604](https://github.com/kubernetes/minikube/pull/6604)
|
||||
* Fix kubeConfigPath [#6568](https://github.com/kubernetes/minikube/pull/6568)
|
||||
* Fix `minikube start` in order to be able to start VM even if machine does not exist [#5730](https://github.com/kubernetes/minikube/pull/5730)
|
||||
* Fail fast if waiting for SSH to be available [#6625](https://github.com/kubernetes/minikube/pull/6625)
|
||||
* Add RPFilter to ISO kernel - required for modern Calico releases [#6690](https://github.com/kubernetes/minikube/pull/6690)
|
||||
* Update Kubernetes default version to v1.17.3 [#6602](https://github.com/kubernetes/minikube/pull/6602)
|
||||
* Update crictl to v1.17.0 [#6667](https://github.com/kubernetes/minikube/pull/6667)
|
||||
* Add conntrack-tools, needed for kubernetes 1.18 [#6626](https://github.com/kubernetes/minikube/pull/6626)
|
||||
* Stopped and running machines should count as existing [#6629](https://github.com/kubernetes/minikube/pull/6629)
|
||||
* Upgrade Docker to 19.03.6 [#6618](https://github.com/kubernetes/minikube/pull/6618)
|
||||
* Upgrade conmon version for podman [#6622](https://github.com/kubernetes/minikube/pull/6622)
|
||||
* Upgrade podman to 1.6.5 [#6623](https://github.com/kubernetes/minikube/pull/6623)
|
||||
* Update helm-tiller addon image v2.14.3 → v2.16.1 [#6575](https://github.com/kubernetes/minikube/pull/6575)
|
||||
|
||||
Thank you to our wonderful and amazing contributors who contributed to this bug-fix release:
|
||||
|
||||
- Anders F Björklund
|
||||
- Nguyen Hai Truong
|
||||
- Martynas Pumputis
|
||||
- Thomas Strömberg
|
||||
- Medya Ghazizadeh
|
||||
- Wietse Muizelaar
|
||||
- Zhongcheng Lao
|
||||
- Sharif Elgamal
|
||||
- Priya Wadhwa
|
||||
- Rohan Maity
|
||||
- anencore94
|
||||
- aallbright
|
||||
- Tam Mach
|
||||
- edge0701
|
||||
- go_vargo
|
||||
- sayboras
|
||||
|
||||
## Version 1.7.2 - 2020-02-07
|
||||
|
||||
* Fix to delete context when delete minikube [#6541](https://github.com/kubernetes/minikube/pull/6541)
|
||||
|
|
26
Makefile
26
Makefile
|
@ -15,24 +15,24 @@
|
|||
# Bump these on release - and please check ISO_VERSION for correctness.
|
||||
VERSION_MAJOR ?= 1
|
||||
VERSION_MINOR ?= 7
|
||||
VERSION_BUILD ?= 2
|
||||
VERSION_BUILD ?= 3
|
||||
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).${VERSION_BUILD}
|
||||
VERSION ?= v$(RAW_VERSION)
|
||||
|
||||
KUBERNETES_VERSION ?= $(shell egrep "^var DefaultKubernetesVersion" pkg/minikube/constants/constants.go | cut -d \" -f2)
|
||||
KIC_VERSION ?= $(shell egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2)
|
||||
|
||||
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
|
||||
ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).0
|
||||
ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).3
|
||||
# Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta
|
||||
DEB_VERSION ?= $(subst -,~,$(RAW_VERSION))
|
||||
RPM_VERSION ?= $(DEB_VERSION)
|
||||
KIC_VERSION ?= 0.0.5
|
||||
|
||||
# used by hack/jenkins/release_build_and_upload.sh and KVM_BUILD_IMAGE, see also BUILD_IMAGE below
|
||||
GO_VERSION ?= 1.13.4
|
||||
|
||||
INSTALL_SIZE ?= $(shell du out/minikube-windows-amd64.exe | cut -f1)
|
||||
BUILDROOT_BRANCH ?= 2019.02.8
|
||||
BUILDROOT_BRANCH ?= 2019.02.9
|
||||
REGISTRY?=gcr.io/k8s-minikube
|
||||
|
||||
# Get git commit id
|
||||
|
@ -52,15 +52,15 @@ MINIKUBE_BUCKET ?= minikube/releases
|
|||
MINIKUBE_UPLOAD_LOCATION := gs://${MINIKUBE_BUCKET}
|
||||
MINIKUBE_RELEASES_URL=https://github.com/kubernetes/minikube/releases/download
|
||||
|
||||
KERNEL_VERSION ?= 4.19.88
|
||||
KERNEL_VERSION ?= 4.19.94
|
||||
# latest from https://github.com/golangci/golangci-lint/releases
|
||||
GOLINT_VERSION ?= v1.23.2
|
||||
GOLINT_VERSION ?= v1.23.6
|
||||
# Limit number of default jobs, to avoid the CI builds running out of memory
|
||||
GOLINT_JOBS ?= 4
|
||||
# see https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint
|
||||
GOLINT_GOGC ?= 100
|
||||
# options for lint (golangci-lint)
|
||||
GOLINT_OPTIONS = --timeout 4m \
|
||||
GOLINT_OPTIONS = --timeout 7m \
|
||||
--build-tags "${MINIKUBE_INTEGRATION_BUILD_TAGS}" \
|
||||
--enable goimports,gocritic,golint,gocyclo,misspell,nakedret,stylecheck,unconvert,unparam,dogsled \
|
||||
--exclude 'variable on range scope.*in function literal|ifElseChain' \
|
||||
|
@ -270,6 +270,10 @@ integration-versioned: out/minikube ## Trigger minikube integration testing
|
|||
test: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go ## Trigger minikube test
|
||||
./test.sh
|
||||
|
||||
.PHONY: gotest
|
||||
gotest: $(SOURCE_GENERATED) ## Trigger minikube test
|
||||
go test -tags "$(MINIKUBE_BUILD_TAGS)" $(MINIKUBE_TEST_FILES)
|
||||
|
||||
.PHONY: extract
|
||||
extract: ## Compile extract tool
|
||||
go run cmd/extract/extract.go
|
||||
|
@ -508,14 +512,14 @@ storage-provisioner-image: out/storage-provisioner-$(GOARCH) ## Build storage-pr
|
|||
|
||||
.PHONY: kic-base-image
|
||||
kic-base-image: ## builds the base image used for kic.
|
||||
docker rmi -f $(REGISTRY)/kicbase:v$(KIC_VERSION)-snapshot || true
|
||||
docker build -f ./hack/images/kicbase.Dockerfile -t $(REGISTRY)/kicbase:v$(KIC_VERSION)-snapshot --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --target base .
|
||||
docker rmi -f $(REGISTRY)/kicbase:$(KIC_VERSION)-snapshot || true
|
||||
docker build -f ./hack/images/kicbase.Dockerfile -t $(REGISTRY)/kicbase:$(KIC_VERSION)-snapshot --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --target base .
|
||||
|
||||
|
||||
.PHONY: kic-preloaded-base-image
|
||||
kic-preloaded-base-image: generate-preloaded-images-tar ## builds the base image used for kic.
|
||||
docker rmi -f $(REGISTRY)/kicbase:v$(KIC_VERSION)-k8s-${KUBERNETES_VERSION} || true
|
||||
docker build -f ./hack/images/kicbase.Dockerfile -t $(REGISTRY)/kicbase:v$(KIC_VERSION)-k8s-${KUBERNETES_VERSION} --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} .
|
||||
docker rmi -f $(REGISTRY)/kicbase:$(KIC_VERSION)-k8s-${KUBERNETES_VERSION} || true
|
||||
docker build -f ./hack/images/kicbase.Dockerfile -t $(REGISTRY)/kicbase:$(KIC_VERSION)-k8s-${KUBERNETES_VERSION} --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} .
|
||||
|
||||
.PHONY: generate-preloaded-images-tar
|
||||
generate-preloaded-images-tar: out/minikube
|
||||
|
|
|
@ -191,7 +191,7 @@ func configurableFields() string {
|
|||
|
||||
// ListConfigMap list entries from config file
|
||||
func ListConfigMap(name string) ([]string, error) {
|
||||
configFile, err := config.ReadConfig(localpath.ConfigFile)
|
||||
configFile, err := config.ReadConfig(localpath.ConfigFile())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ func AddToConfigMap(name string, images []string) error {
|
|||
return err
|
||||
}
|
||||
// Set the values
|
||||
cfg, err := config.ReadConfig(localpath.ConfigFile)
|
||||
cfg, err := config.ReadConfig(localpath.ConfigFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -228,7 +228,7 @@ func AddToConfigMap(name string, images []string) error {
|
|||
return err
|
||||
}
|
||||
// Write the values
|
||||
return config.WriteConfig(localpath.ConfigFile, cfg)
|
||||
return config.WriteConfig(localpath.ConfigFile(), cfg)
|
||||
}
|
||||
|
||||
// DeleteFromConfigMap deletes entries from a map in the config file
|
||||
|
@ -238,7 +238,7 @@ func DeleteFromConfigMap(name string, images []string) error {
|
|||
return err
|
||||
}
|
||||
// Set the values
|
||||
cfg, err := config.ReadConfig(localpath.ConfigFile)
|
||||
cfg, err := config.ReadConfig(localpath.ConfigFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -253,5 +253,5 @@ func DeleteFromConfigMap(name string, images []string) error {
|
|||
return err
|
||||
}
|
||||
// Write the values
|
||||
return config.WriteConfig(localpath.ConfigFile, cfg)
|
||||
return config.WriteConfig(localpath.ConfigFile(), cfg)
|
||||
}
|
||||
|
|
|
@ -16,24 +16,28 @@ limitations under the License.
|
|||
|
||||
package config
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetNotFound(t *testing.T) {
|
||||
createTestConfig(t)
|
||||
_, err := Get("nonexistent")
|
||||
if err == nil {
|
||||
if err == nil || err.Error() != "specified key could not be found in config" {
|
||||
t.Fatalf("Get did not return error for unknown property")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetOK(t *testing.T) {
|
||||
createTestConfig(t)
|
||||
name := "vm-driver"
|
||||
err := Set(name, "virtualbox")
|
||||
if err != nil {
|
||||
t.Fatalf("Set returned error for property %s", name)
|
||||
t.Fatalf("Set returned error for property %s, %+v", name, err)
|
||||
}
|
||||
val, err := Get(name)
|
||||
if err != nil {
|
||||
t.Fatalf("Get returned error for property %s", name)
|
||||
t.Fatalf("Get returned error for property %s, %+v", name, err)
|
||||
}
|
||||
if val != "virtualbox" {
|
||||
t.Fatalf("Get returned %s, expected virtualbox", val)
|
||||
|
|
|
@ -65,11 +65,7 @@ var ProfileCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
if !pkgConfig.ProfileExists(profile) {
|
||||
err := pkgConfig.CreateEmptyProfile(profile)
|
||||
if err != nil {
|
||||
exit.WithError("Creating a new profile failed", err)
|
||||
}
|
||||
out.SuccessT("Created a new profile : {{.profile_name}}", out.V{"profile_name": profile})
|
||||
out.FailureT("if you want to create a profile you can by this command: minikube start -p {{.profile_name}}", out.V{"profile_name": profile})
|
||||
}
|
||||
|
||||
err := Set(pkgConfig.MachineProfile, profile)
|
||||
|
@ -91,7 +87,7 @@ var ProfileCmd = &cobra.Command{
|
|||
out.ErrT(out.Sad, `Error while setting kubectl current context : {{.error}}`, out.V{"error": err})
|
||||
}
|
||||
}
|
||||
out.SuccessT("minikube profile was successfully set to {{.profile_name}}", out.V{"profile_name": profile})
|
||||
}
|
||||
out.SuccessT("minikube profile was successfully set to {{.profile_name}}", out.V{"profile_name": profile})
|
||||
},
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package config
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
pkgConfig "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
|
@ -51,30 +52,30 @@ func init() {
|
|||
func Set(name string, value string) error {
|
||||
s, err := findSetting(name)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "find settings for %q value of %q", name, value)
|
||||
}
|
||||
// Validate the new value
|
||||
err = run(name, value, s.validations)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "run validations for %q with value of %q", name, value)
|
||||
}
|
||||
|
||||
// Set the value
|
||||
config, err := pkgConfig.ReadConfig(localpath.ConfigFile)
|
||||
config, err := pkgConfig.ReadConfig(localpath.ConfigFile())
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "read config file %q", localpath.ConfigFile())
|
||||
}
|
||||
err = s.set(config, name, value)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "set")
|
||||
}
|
||||
|
||||
// Run any callbacks for this property
|
||||
err = run(name, value, s.callbacks)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "run callbacks for %q with value of %q", name, value)
|
||||
}
|
||||
|
||||
// Write the value
|
||||
return pkgConfig.WriteConfig(localpath.ConfigFile, config)
|
||||
return pkgConfig.WriteConfig(localpath.ConfigFile(), config)
|
||||
}
|
||||
|
|
|
@ -19,68 +19,66 @@ package config
|
|||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
)
|
||||
|
||||
func TestNotFound(t *testing.T) {
|
||||
createTestProfile(t)
|
||||
createTestConfig(t)
|
||||
err := Set("nonexistent", "10")
|
||||
if err == nil {
|
||||
t.Fatalf("Set did not return error for unknown property")
|
||||
if err == nil || err.Error() != "find settings for \"nonexistent\" value of \"10\": property name \"nonexistent\" not found" {
|
||||
t.Fatalf("Set did not return error for unknown property: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetNotAllowed(t *testing.T) {
|
||||
createTestProfile(t)
|
||||
createTestConfig(t)
|
||||
err := Set("vm-driver", "123456")
|
||||
if err == nil || err.Error() != "[driver \"123456\" is not supported]" {
|
||||
t.Fatalf("Set did not return error for unallowed value")
|
||||
if err == nil || err.Error() != "run validations for \"vm-driver\" with value of \"123456\": [driver \"123456\" is not supported]" {
|
||||
t.Fatalf("Set did not return error for unallowed value: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetOK(t *testing.T) {
|
||||
createTestProfile(t)
|
||||
createTestConfig(t)
|
||||
err := Set("vm-driver", "virtualbox")
|
||||
defer func() {
|
||||
err = Unset("vm-driver")
|
||||
if err != nil {
|
||||
t.Errorf("failed to unset vm-driver")
|
||||
t.Errorf("failed to unset vm-driver: %+v", err)
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
t.Fatalf("Set returned error for valid property value")
|
||||
t.Fatalf("Set returned error for valid property value: %+v", err)
|
||||
}
|
||||
val, err := Get("vm-driver")
|
||||
if err != nil {
|
||||
t.Fatalf("Get returned error for valid property")
|
||||
t.Fatalf("Get returned error for valid property: %+v", err)
|
||||
}
|
||||
if val != "virtualbox" {
|
||||
t.Fatalf("Get returned %s, expected \"virtualbox\"", val)
|
||||
}
|
||||
}
|
||||
|
||||
func createTestProfile(t *testing.T) {
|
||||
func createTestConfig(t *testing.T) {
|
||||
t.Helper()
|
||||
td, err := ioutil.TempDir("", "profile")
|
||||
td, err := ioutil.TempDir("", "config")
|
||||
if err != nil {
|
||||
t.Fatalf("tempdir: %v", err)
|
||||
}
|
||||
|
||||
err = os.Setenv(localpath.MinikubeHome, td)
|
||||
if err != nil {
|
||||
t.Errorf("error setting up test environment. could not set %s", localpath.MinikubeHome)
|
||||
t.Fatalf("error setting up test environment. could not set %s due to %+v", localpath.MinikubeHome, err)
|
||||
}
|
||||
|
||||
// Not necessary, but it is a handy random alphanumeric
|
||||
name := filepath.Base(td)
|
||||
if err := os.MkdirAll(config.ProfileFolderPath(name), 0777); err != nil {
|
||||
t.Fatalf("error creating temporary directory")
|
||||
if err = os.MkdirAll(localpath.MakeMiniPath("config"), 0777); err != nil {
|
||||
t.Fatalf("error creating temporary directory: %+v", err)
|
||||
}
|
||||
if err := config.DefaultLoader.WriteConfigToFile(name, &config.ClusterConfig{}); err != nil {
|
||||
t.Fatalf("error creating temporary profile config: %v", err)
|
||||
|
||||
if err = os.MkdirAll(localpath.MakeMiniPath("profiles"), 0777); err != nil {
|
||||
t.Fatalf("error creating temporary profiles directory: %+v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,10 +44,10 @@ func init() {
|
|||
|
||||
// Unset unsets a property
|
||||
func Unset(name string) error {
|
||||
m, err := pkgConfig.ReadConfig(localpath.ConfigFile)
|
||||
m, err := pkgConfig.ReadConfig(localpath.ConfigFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delete(m, name)
|
||||
return pkgConfig.WriteConfig(localpath.ConfigFile, m)
|
||||
return pkgConfig.WriteConfig(localpath.ConfigFile(), m)
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
)
|
||||
|
||||
func TestUnsetConfig(t *testing.T) {
|
||||
createTestConfig(t)
|
||||
propName := "cpus"
|
||||
propValue := "1"
|
||||
err := Set(propName, propValue)
|
||||
|
|
|
@ -57,7 +57,7 @@ For the list of accessible variables for the template, see the struct values her
|
|||
|
||||
// View displays the current config
|
||||
func View() error {
|
||||
cfg, err := config.ReadConfig(localpath.ConfigFile)
|
||||
cfg, err := config.ReadConfig(localpath.ConfigFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||
|
@ -96,9 +97,12 @@ func runDelete(cmd *cobra.Command, args []string) {
|
|||
profileFlag := viper.GetString(config.MachineProfile)
|
||||
|
||||
validProfiles, invalidProfiles, err := pkg_config.ListProfiles()
|
||||
if err != nil {
|
||||
glog.Warningf("'error loading profiles in minikube home %q: %v", localpath.MiniPath(), err)
|
||||
}
|
||||
profilesToDelete := append(validProfiles, invalidProfiles...)
|
||||
|
||||
// If the purge flag is set, go ahead and delete the .minikube directory.
|
||||
// in the case user has more than 1 profile and runs --purge
|
||||
// to prevent abandoned VMs/containers, force user to run with delete --all
|
||||
if purge && len(profilesToDelete) > 1 && !deleteAll {
|
||||
out.ErrT(out.Notice, "Multiple minikube profiles were found - ")
|
||||
for _, p := range profilesToDelete {
|
||||
|
@ -111,12 +115,23 @@ func runDelete(cmd *cobra.Command, args []string) {
|
|||
if profileFlag != constants.DefaultMachineName {
|
||||
exit.UsageT("usage: minikube delete --all")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
exit.WithError("Error getting profiles to delete", err)
|
||||
delLabel := fmt.Sprintf("%s=%s", oci.CreatedByLabelKey, "true")
|
||||
errs := oci.DeleteAllContainersByLabel(oci.Docker, delLabel)
|
||||
if len(errs) > 0 { // it will error if there is no container to delete
|
||||
glog.Infof("error delete containers by label %q (might be okay): %+v", delLabel, err)
|
||||
}
|
||||
|
||||
errs := DeleteProfiles(profilesToDelete)
|
||||
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
|
||||
if len(errs) > 0 { // it will not error if there is nothing to delete
|
||||
glog.Warningf("error delete volumes by label %q (might be okay): %+v", delLabel, errs)
|
||||
}
|
||||
|
||||
errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel)
|
||||
if len(errs) > 0 { // it will not error if there is nothing to delete
|
||||
glog.Warningf("error pruning volumes by label %q (might be okay): %+v", delLabel, errs)
|
||||
}
|
||||
|
||||
errs = DeleteProfiles(profilesToDelete)
|
||||
if len(errs) > 0 {
|
||||
HandleDeletionErrors(errs)
|
||||
} else {
|
||||
|
@ -178,6 +193,21 @@ func DeleteProfiles(profiles []*pkg_config.Profile) []error {
|
|||
func deleteProfile(profile *pkg_config.Profile) error {
|
||||
viper.Set(pkg_config.MachineProfile, profile.Name)
|
||||
|
||||
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, profile.Name)
|
||||
errs := oci.DeleteAllContainersByLabel(oci.Docker, delLabel)
|
||||
if errs != nil { // it will error if there is no container to delete
|
||||
glog.Infof("error deleting containers for %s (might be okay):\n%v", profile.Name, errs)
|
||||
}
|
||||
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
|
||||
if errs != nil { // it will not error if there is nothing to delete
|
||||
glog.Warningf("error deleting volumes (might be okay).\nTo see the list of volumes run: 'docker volume ls'\n:%v", errs)
|
||||
}
|
||||
|
||||
errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel)
|
||||
if len(errs) > 0 { // it will not error if there is nothing to delete
|
||||
glog.Warningf("error pruning volume (might be okay):\n%v", errs)
|
||||
}
|
||||
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("error getting client %v", err))
|
||||
|
|
|
@ -95,7 +95,7 @@ set -gx DOCKER_CERT_PATH "/certs"
|
|||
set -gx MINIKUBE_ACTIVE_DOCKERD "fish"
|
||||
|
||||
# To point your shell to minikube's docker-daemon, run:
|
||||
# eval (minikube -p fish docker-env)
|
||||
# minikube -p fish docker-env | source
|
||||
`,
|
||||
`set -e DOCKER_TLS_VERIFY
|
||||
set -e DOCKER_HOST
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"net"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -151,6 +152,15 @@ var mountCmd = &cobra.Command{
|
|||
cfg.Options[parts[0]] = parts[1]
|
||||
}
|
||||
|
||||
// An escape valve to allow future hackers to try NFS, VirtFS, or other FS types.
|
||||
if !supportedFilesystems[cfg.Type] {
|
||||
out.T(out.WarningType, "{{.type}} is not yet a supported filesystem. We will try anyways!", out.V{"type": cfg.Type})
|
||||
}
|
||||
|
||||
bindIP := ip.String() // the ip to listen on the user's host machine
|
||||
if driver.IsKIC(host.Driver.DriverName()) && runtime.GOOS != "linux" {
|
||||
bindIP = "127.0.0.1"
|
||||
}
|
||||
out.T(out.Mounting, "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...", out.V{"sourcePath": hostPath, "destinationPath": vmPath})
|
||||
out.T(out.Option, "Mount type: {{.name}}", out.V{"type": cfg.Type})
|
||||
out.T(out.Option, "User ID: {{.userID}}", out.V{"userID": cfg.UID})
|
||||
|
@ -159,18 +169,14 @@ var mountCmd = &cobra.Command{
|
|||
out.T(out.Option, "Message Size: {{.size}}", out.V{"size": cfg.MSize})
|
||||
out.T(out.Option, "Permissions: {{.octalMode}} ({{.writtenMode}})", out.V{"octalMode": fmt.Sprintf("%o", cfg.Mode), "writtenMode": cfg.Mode})
|
||||
out.T(out.Option, "Options: {{.options}}", out.V{"options": cfg.Options})
|
||||
|
||||
// An escape valve to allow future hackers to try NFS, VirtFS, or other FS types.
|
||||
if !supportedFilesystems[cfg.Type] {
|
||||
out.T(out.WarningType, "{{.type}} is not yet a supported filesystem. We will try anyways!", out.V{"type": cfg.Type})
|
||||
}
|
||||
out.T(out.Option, "Bind Address: {{.Address}}", out.V{"Address": net.JoinHostPort(bindIP, fmt.Sprint(port))})
|
||||
|
||||
var wg sync.WaitGroup
|
||||
if cfg.Type == nineP {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
out.T(out.Fileserver, "Userspace file server: ")
|
||||
ufs.StartServer(net.JoinHostPort(ip.String(), strconv.Itoa(port)), debugVal, hostPath)
|
||||
ufs.StartServer(net.JoinHostPort(bindIP, strconv.Itoa(port)), debugVal, hostPath)
|
||||
out.T(out.Stopped, "Userspace file server is shutdown")
|
||||
wg.Done()
|
||||
}()
|
||||
|
|
|
@ -233,7 +233,7 @@ func init() {
|
|||
|
||||
// initConfig reads in config file and ENV variables if set.
|
||||
func initConfig() {
|
||||
configPath := localpath.ConfigFile
|
||||
configPath := localpath.ConfigFile()
|
||||
viper.SetConfigFile(configPath)
|
||||
viper.SetConfigType("json")
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
|
|
|
@ -328,9 +328,12 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
ssh.SetDefaultClient(ssh.External)
|
||||
}
|
||||
|
||||
existingAddons := map[string]bool{}
|
||||
if existing != nil && existing.Addons != nil {
|
||||
existingAddons = existing.Addons
|
||||
var existingAddons map[string]bool
|
||||
if viper.GetBool(installAddons) {
|
||||
existingAddons = map[string]bool{}
|
||||
if existing != nil && existing.Addons != nil {
|
||||
existingAddons = existing.Addons
|
||||
}
|
||||
}
|
||||
kubeconfig, err := node.Start(mc, n, true, existingAddons)
|
||||
if err != nil {
|
||||
|
@ -665,7 +668,7 @@ func validateFlags(cmd *cobra.Command, drvName string) {
|
|||
validateDiskSize()
|
||||
validateMemorySize()
|
||||
|
||||
if !driver.HasResourceLimits(drvName) { // both podman and none need root and they both cant specify resources
|
||||
if !driver.HasResourceLimits(drvName) {
|
||||
if cmd.Flags().Changed(cpus) {
|
||||
out.WarningT("The '{{.name}}' driver does not respect the --cpus flag", out.V{"name": drvName})
|
||||
}
|
||||
|
|
|
@ -44,10 +44,16 @@ var statusFormat string
|
|||
var output string
|
||||
|
||||
const (
|
||||
// Additional states used by kubeconfig
|
||||
Configured = "Configured" // ~state.Saved
|
||||
// # Additional states used by kubeconfig:
|
||||
|
||||
// Configured means configured
|
||||
Configured = "Configured" // ~state.Saved
|
||||
// Misconfigured means misconfigured
|
||||
Misconfigured = "Misconfigured" // ~state.Error
|
||||
// Additional states used for clarity
|
||||
|
||||
// # Additional states used for clarity:
|
||||
|
||||
// Nonexistent means nonexistent
|
||||
Nonexistent = "Nonexistent" // ~state.None
|
||||
)
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ spec:
|
|||
hostPID: true
|
||||
containers:
|
||||
- name: gvisor
|
||||
image: {{default "gcr.io/k8s-minikube" .ImageRepository}}/gvisor-addon:2
|
||||
image: {{default "gcr.io/k8s-minikube" .ImageRepository}}/gvisor-addon:3
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
|
|
|
@ -18,7 +18,7 @@ spec:
|
|||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
containers:
|
||||
- image: registry.hub.docker.com/library/registry:2.6.1
|
||||
- image: registry.hub.docker.com/library/registry:2.7.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: registry
|
||||
ports:
|
||||
|
|
|
@ -14,5 +14,6 @@
|
|||
|
||||
# Need an image with chroot
|
||||
FROM alpine:3
|
||||
RUN apk -U add ca-certificates
|
||||
COPY out/gvisor-addon /gvisor-addon
|
||||
CMD ["/gvisor-addon"]
|
||||
|
|
|
@ -233,6 +233,7 @@ CONFIG_IP_VS_NQ=m
|
|||
CONFIG_IP_VS_NFCT=y
|
||||
CONFIG_NF_LOG_ARP=m
|
||||
CONFIG_IP_NF_IPTABLES=y
|
||||
CONFIG_IP_NF_MATCH_RPFILTER=y
|
||||
CONFIG_IP_NF_FILTER=y
|
||||
CONFIG_IP_NF_TARGET_REJECT=y
|
||||
CONFIG_IP_NF_NAT=m
|
||||
|
|
|
@ -0,0 +1,470 @@
|
|||
From 398f9301fd2db3b15407a62e90d416914f94c669 Mon Sep 17 00:00:00 2001
|
||||
From: Yu Watanabe <watanabe.yu+github@gmail.com>
|
||||
Date: Mon, 11 Feb 2019 21:25:13 +0900
|
||||
Subject: [PATCH 1/3] network: minor coding style update
|
||||
|
||||
(cherry picked from commit 860e636cf6855260634ef2f7f52af4635b1271c3)
|
||||
---
|
||||
src/network/networkd-dhcp4.c | 16 +++++++---------
|
||||
1 file changed, 7 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/src/network/networkd-dhcp4.c b/src/network/networkd-dhcp4.c
|
||||
index 980d49e..61f767a 100644
|
||||
--- a/src/network/networkd-dhcp4.c
|
||||
+++ b/src/network/networkd-dhcp4.c
|
||||
@@ -70,7 +70,7 @@ static int link_set_dhcp_routes(Link *link) {
|
||||
/* When the interface is part of an VRF use the VRFs routing table, unless
|
||||
* there is a another table specified. */
|
||||
table = link->network->dhcp_route_table;
|
||||
- if (!link->network->dhcp_route_table_set && link->network->vrf != NULL)
|
||||
+ if (!link->network->dhcp_route_table_set && link->network->vrf)
|
||||
table = VRF(link->network->vrf)->table;
|
||||
|
||||
r = sd_dhcp_lease_get_address(link->dhcp_lease, &address);
|
||||
@@ -135,14 +135,7 @@ static int link_set_dhcp_routes(Link *link) {
|
||||
log_link_warning(link, "Classless static routes received from DHCP server: ignoring static-route option and router option");
|
||||
|
||||
if (r >= 0 && !classless_route) {
|
||||
- _cleanup_(route_freep) Route *route = NULL;
|
||||
- _cleanup_(route_freep) Route *route_gw = NULL;
|
||||
-
|
||||
- r = route_new(&route);
|
||||
- if (r < 0)
|
||||
- return log_link_error_errno(link, r, "Could not allocate route: %m");
|
||||
-
|
||||
- route->protocol = RTPROT_DHCP;
|
||||
+ _cleanup_(route_freep) Route *route = NULL, *route_gw = NULL;
|
||||
|
||||
r = route_new(&route_gw);
|
||||
if (r < 0)
|
||||
@@ -166,9 +159,14 @@ static int link_set_dhcp_routes(Link *link) {
|
||||
|
||||
link->dhcp4_messages++;
|
||||
|
||||
+ r = route_new(&route);
|
||||
+ if (r < 0)
|
||||
+ return log_link_error_errno(link, r, "Could not allocate route: %m");
|
||||
+
|
||||
route->family = AF_INET;
|
||||
route->gw.in = gateway;
|
||||
route->prefsrc.in = address;
|
||||
+ route->protocol = RTPROT_DHCP;
|
||||
route->priority = link->network->dhcp_route_metric;
|
||||
route->table = table;
|
||||
|
||||
--
|
||||
2.7.4
|
||||
|
||||
From 8fac545db140bd5c29113d13caa2d0d7cb8bb49b Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Haller <thaller@redhat.com>
|
||||
Date: Fri, 14 Dec 2018 11:10:57 +0100
|
||||
Subject: [PATCH 2/3] dhcp: handle multiple addresses for "Router" (option 3)
|
||||
in DHCP library
|
||||
|
||||
The Router DHCP option may contain a list of one or more
|
||||
routers ([1]). Extend the API of sd_dhcp_lease to return a
|
||||
list instead of only the first.
|
||||
|
||||
Note that networkd still only uses the first router (if present).
|
||||
Aside from extending the internal API of the DHCP client, there
|
||||
is almost no change in behavior. The only visible difference in
|
||||
behavior is that the "ROUTER" variable in the lease file is now a
|
||||
list of addresses.
|
||||
|
||||
Note how RFC 2132 does not define certain IP addresses as invalid for the
|
||||
router option. Still, previously sd_dhcp_lease_get_router() would never
|
||||
return a "0.0.0.0" address. In fact, the previous API could not
|
||||
differenciate whether no router option was present, whether it
|
||||
was invalid, or whether its first router was "0.0.0.0". No longer let
|
||||
the DHCP client library impose additional restrictions that are not
|
||||
part of RFC. Instead, the caller should handle this. The patch does
|
||||
that, and networkd only consideres the first router entry if it is not
|
||||
"0.0.0.0".
|
||||
|
||||
[1] https://tools.ietf.org/html/rfc2132#section-3.5
|
||||
|
||||
(cherry picked from commit f8862395e8f802e4106a07ceaaf02b6a1faa5a6d)
|
||||
---
|
||||
src/libsystemd-network/dhcp-lease-internal.h | 4 ++-
|
||||
src/libsystemd-network/sd-dhcp-lease.c | 50 +++++++++++++++-------------
|
||||
src/libsystemd-network/test-dhcp-client.c | 7 ++--
|
||||
src/network/networkd-dhcp4.c | 37 ++++++++++----------
|
||||
src/systemd/sd-dhcp-lease.h | 2 +-
|
||||
5 files changed, 54 insertions(+), 46 deletions(-)
|
||||
|
||||
diff --git a/src/libsystemd-network/dhcp-lease-internal.h b/src/libsystemd-network/dhcp-lease-internal.h
|
||||
index 9d245a9..122042a 100644
|
||||
--- a/src/libsystemd-network/dhcp-lease-internal.h
|
||||
+++ b/src/libsystemd-network/dhcp-lease-internal.h
|
||||
@@ -41,7 +41,6 @@ struct sd_dhcp_lease {
|
||||
/* each 0 if unset */
|
||||
be32_t address;
|
||||
be32_t server_address;
|
||||
- be32_t router;
|
||||
be32_t next_server;
|
||||
|
||||
bool have_subnet_mask;
|
||||
@@ -50,6 +49,9 @@ struct sd_dhcp_lease {
|
||||
bool have_broadcast;
|
||||
be32_t broadcast;
|
||||
|
||||
+ struct in_addr *router;
|
||||
+ size_t router_size;
|
||||
+
|
||||
struct in_addr *dns;
|
||||
size_t dns_size;
|
||||
|
||||
diff --git a/src/libsystemd-network/sd-dhcp-lease.c b/src/libsystemd-network/sd-dhcp-lease.c
|
||||
index 13badbf..406188c 100644
|
||||
--- a/src/libsystemd-network/sd-dhcp-lease.c
|
||||
+++ b/src/libsystemd-network/sd-dhcp-lease.c
|
||||
@@ -151,15 +151,15 @@ int sd_dhcp_lease_get_root_path(sd_dhcp_lease *lease, const char **root_path) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
-int sd_dhcp_lease_get_router(sd_dhcp_lease *lease, struct in_addr *addr) {
|
||||
+int sd_dhcp_lease_get_router(sd_dhcp_lease *lease, const struct in_addr **addr) {
|
||||
assert_return(lease, -EINVAL);
|
||||
assert_return(addr, -EINVAL);
|
||||
|
||||
- if (lease->router == 0)
|
||||
+ if (lease->router_size <= 0)
|
||||
return -ENODATA;
|
||||
|
||||
- addr->s_addr = lease->router;
|
||||
- return 0;
|
||||
+ *addr = lease->router;
|
||||
+ return (int) lease->router_size;
|
||||
}
|
||||
|
||||
int sd_dhcp_lease_get_netmask(sd_dhcp_lease *lease, struct in_addr *addr) {
|
||||
@@ -261,6 +261,7 @@ static sd_dhcp_lease *dhcp_lease_free(sd_dhcp_lease *lease) {
|
||||
}
|
||||
|
||||
free(lease->root_path);
|
||||
+ free(lease->router);
|
||||
free(lease->timezone);
|
||||
free(lease->hostname);
|
||||
free(lease->domainname);
|
||||
@@ -387,7 +388,7 @@ static void filter_bogus_addresses(struct in_addr *addresses, size_t *n) {
|
||||
*n = j;
|
||||
}
|
||||
|
||||
-static int lease_parse_in_addrs(const uint8_t *option, size_t len, struct in_addr **ret, size_t *n_ret) {
|
||||
+static int lease_parse_in_addrs(const uint8_t *option, size_t len, bool filter_bogus, struct in_addr **ret, size_t *n_ret) {
|
||||
assert(option);
|
||||
assert(ret);
|
||||
assert(n_ret);
|
||||
@@ -408,7 +409,8 @@ static int lease_parse_in_addrs(const uint8_t *option, size_t len, struct in_add
|
||||
if (!addresses)
|
||||
return -ENOMEM;
|
||||
|
||||
- filter_bogus_addresses(addresses, &n_addresses);
|
||||
+ if (filter_bogus)
|
||||
+ filter_bogus_addresses(addresses, &n_addresses);
|
||||
|
||||
free(*ret);
|
||||
*ret = addresses;
|
||||
@@ -554,21 +556,19 @@ int dhcp_lease_parse_options(uint8_t code, uint8_t len, const void *option, void
|
||||
break;
|
||||
|
||||
case SD_DHCP_OPTION_ROUTER:
|
||||
- if (len >= 4) {
|
||||
- r = lease_parse_be32(option, 4, &lease->router);
|
||||
- if (r < 0)
|
||||
- log_debug_errno(r, "Failed to parse router address, ignoring: %m");
|
||||
- }
|
||||
+ r = lease_parse_in_addrs(option, len, false, &lease->router, &lease->router_size);
|
||||
+ if (r < 0)
|
||||
+ log_debug_errno(r, "Failed to parse router addresses, ignoring: %m");
|
||||
break;
|
||||
|
||||
case SD_DHCP_OPTION_DOMAIN_NAME_SERVER:
|
||||
- r = lease_parse_in_addrs(option, len, &lease->dns, &lease->dns_size);
|
||||
+ r = lease_parse_in_addrs(option, len, true, &lease->dns, &lease->dns_size);
|
||||
if (r < 0)
|
||||
log_debug_errno(r, "Failed to parse DNS server, ignoring: %m");
|
||||
break;
|
||||
|
||||
case SD_DHCP_OPTION_NTP_SERVER:
|
||||
- r = lease_parse_in_addrs(option, len, &lease->ntp, &lease->ntp_size);
|
||||
+ r = lease_parse_in_addrs(option, len, true, &lease->ntp, &lease->ntp_size);
|
||||
if (r < 0)
|
||||
log_debug_errno(r, "Failed to parse NTP server, ignoring: %m");
|
||||
break;
|
||||
@@ -820,7 +820,6 @@ int dhcp_lease_new(sd_dhcp_lease **ret) {
|
||||
if (!lease)
|
||||
return -ENOMEM;
|
||||
|
||||
- lease->router = INADDR_ANY;
|
||||
lease->n_ref = 1;
|
||||
|
||||
*ret = lease;
|
||||
@@ -863,9 +862,12 @@ int dhcp_lease_save(sd_dhcp_lease *lease, const char *lease_file) {
|
||||
if (r >= 0)
|
||||
fprintf(f, "NETMASK=%s\n", inet_ntoa(address));
|
||||
|
||||
- r = sd_dhcp_lease_get_router(lease, &address);
|
||||
- if (r >= 0)
|
||||
- fprintf(f, "ROUTER=%s\n", inet_ntoa(address));
|
||||
+ r = sd_dhcp_lease_get_router(lease, &addresses);
|
||||
+ if (r > 0) {
|
||||
+ fputs("ROUTER=", f);
|
||||
+ serialize_in_addrs(f, addresses, r);
|
||||
+ fputc('\n', f);
|
||||
+ }
|
||||
|
||||
r = sd_dhcp_lease_get_server_identifier(lease, &address);
|
||||
if (r >= 0)
|
||||
@@ -899,14 +901,14 @@ int dhcp_lease_save(sd_dhcp_lease *lease, const char *lease_file) {
|
||||
if (r > 0) {
|
||||
fputs("DNS=", f);
|
||||
serialize_in_addrs(f, addresses, r);
|
||||
- fputs("\n", f);
|
||||
+ fputc('\n', f);
|
||||
}
|
||||
|
||||
r = sd_dhcp_lease_get_ntp(lease, &addresses);
|
||||
if (r > 0) {
|
||||
fputs("NTP=", f);
|
||||
serialize_in_addrs(f, addresses, r);
|
||||
- fputs("\n", f);
|
||||
+ fputc('\n', f);
|
||||
}
|
||||
|
||||
r = sd_dhcp_lease_get_domainname(lease, &string);
|
||||
@@ -917,7 +919,7 @@ int dhcp_lease_save(sd_dhcp_lease *lease, const char *lease_file) {
|
||||
if (r > 0) {
|
||||
fputs("DOMAIN_SEARCH_LIST=", f);
|
||||
fputstrv(f, search_domains, NULL, NULL);
|
||||
- fputs("\n", f);
|
||||
+ fputc('\n', f);
|
||||
}
|
||||
|
||||
r = sd_dhcp_lease_get_hostname(lease, &string);
|
||||
@@ -1080,9 +1082,11 @@ int dhcp_lease_load(sd_dhcp_lease **ret, const char *lease_file) {
|
||||
}
|
||||
|
||||
if (router) {
|
||||
- r = inet_pton(AF_INET, router, &lease->router);
|
||||
- if (r <= 0)
|
||||
- log_debug("Failed to parse router %s, ignoring.", router);
|
||||
+ r = deserialize_in_addrs(&lease->router, router);
|
||||
+ if (r < 0)
|
||||
+ log_debug_errno(r, "Failed to deserialize router addresses %s, ignoring: %m", router);
|
||||
+ else
|
||||
+ lease->router_size = r;
|
||||
}
|
||||
|
||||
if (netmask) {
|
||||
diff --git a/src/libsystemd-network/test-dhcp-client.c b/src/libsystemd-network/test-dhcp-client.c
|
||||
index fe6788d..0431e2c 100644
|
||||
--- a/src/libsystemd-network/test-dhcp-client.c
|
||||
+++ b/src/libsystemd-network/test-dhcp-client.c
|
||||
@@ -423,6 +423,7 @@ static void test_addr_acq_acquired(sd_dhcp_client *client, int event,
|
||||
sd_event *e = userdata;
|
||||
sd_dhcp_lease *lease;
|
||||
struct in_addr addr;
|
||||
+ const struct in_addr *addrs;
|
||||
|
||||
assert_se(client);
|
||||
assert_se(event == SD_DHCP_CLIENT_EVENT_IP_ACQUIRE);
|
||||
@@ -438,9 +439,9 @@ static void test_addr_acq_acquired(sd_dhcp_client *client, int event,
|
||||
assert_se(memcmp(&addr.s_addr, &test_addr_acq_ack[285],
|
||||
sizeof(addr.s_addr)) == 0);
|
||||
|
||||
- assert_se(sd_dhcp_lease_get_router(lease, &addr) >= 0);
|
||||
- assert_se(memcmp(&addr.s_addr, &test_addr_acq_ack[308],
|
||||
- sizeof(addr.s_addr)) == 0);
|
||||
+ assert_se(sd_dhcp_lease_get_router(lease, &addrs) == 1);
|
||||
+ assert_se(memcmp(&addrs[0].s_addr, &test_addr_acq_ack[308],
|
||||
+ sizeof(addrs[0].s_addr)) == 0);
|
||||
|
||||
if (verbose)
|
||||
printf(" DHCP address acquired\n");
|
||||
diff --git a/src/network/networkd-dhcp4.c b/src/network/networkd-dhcp4.c
|
||||
index 61f767a..56512e5 100644
|
||||
--- a/src/network/networkd-dhcp4.c
|
||||
+++ b/src/network/networkd-dhcp4.c
|
||||
@@ -52,7 +52,8 @@ static int route_scope_from_address(const Route *route, const struct in_addr *se
|
||||
static int link_set_dhcp_routes(Link *link) {
|
||||
_cleanup_free_ sd_dhcp_route **static_routes = NULL;
|
||||
bool classless_route = false, static_route = false;
|
||||
- struct in_addr gateway, address;
|
||||
+ const struct in_addr *router;
|
||||
+ struct in_addr address;
|
||||
int r, n, i;
|
||||
uint32_t table;
|
||||
|
||||
@@ -123,18 +124,18 @@ static int link_set_dhcp_routes(Link *link) {
|
||||
link->dhcp4_messages++;
|
||||
}
|
||||
|
||||
- r = sd_dhcp_lease_get_router(link->dhcp_lease, &gateway);
|
||||
- if (r == -ENODATA)
|
||||
- log_link_info_errno(link, r, "DHCP: No gateway received from DHCP server: %m");
|
||||
- else if (r < 0)
|
||||
+ r = sd_dhcp_lease_get_router(link->dhcp_lease, &router);
|
||||
+ if (r < 0 && r != -ENODATA)
|
||||
log_link_warning_errno(link, r, "DHCP error: could not get gateway: %m");
|
||||
+ else if (r <= 0 || in4_addr_is_null(&router[0]))
|
||||
+ log_link_info_errno(link, r, "DHCP: No gateway received from DHCP server: %m");
|
||||
|
||||
/* According to RFC 3442: If the DHCP server returns both a Classless Static Routes option and
|
||||
a Router option, the DHCP client MUST ignore the Router option. */
|
||||
if (classless_route && static_route)
|
||||
log_link_warning(link, "Classless static routes received from DHCP server: ignoring static-route option and router option");
|
||||
|
||||
- if (r >= 0 && !classless_route) {
|
||||
+ if (r > 0 && !classless_route && !in4_addr_is_null(&router[0])) {
|
||||
_cleanup_(route_freep) Route *route = NULL, *route_gw = NULL;
|
||||
|
||||
r = route_new(&route_gw);
|
||||
@@ -145,7 +146,7 @@ static int link_set_dhcp_routes(Link *link) {
|
||||
* route for the gw host so that we can route no matter the
|
||||
* netmask or existing kernel route tables. */
|
||||
route_gw->family = AF_INET;
|
||||
- route_gw->dst.in = gateway;
|
||||
+ route_gw->dst.in = router[0];
|
||||
route_gw->dst_prefixlen = 32;
|
||||
route_gw->prefsrc.in = address;
|
||||
route_gw->scope = RT_SCOPE_LINK;
|
||||
@@ -164,7 +165,7 @@ static int link_set_dhcp_routes(Link *link) {
|
||||
return log_link_error_errno(link, r, "Could not allocate route: %m");
|
||||
|
||||
route->family = AF_INET;
|
||||
- route->gw.in = gateway;
|
||||
+ route->gw.in = router[0];
|
||||
route->prefsrc.in = address;
|
||||
route->protocol = RTPROT_DHCP;
|
||||
route->priority = link->network->dhcp_route_metric;
|
||||
@@ -185,9 +186,9 @@ static int link_set_dhcp_routes(Link *link) {
|
||||
|
||||
static int dhcp_lease_lost(Link *link) {
|
||||
_cleanup_(address_freep) Address *address = NULL;
|
||||
+ const struct in_addr *router;
|
||||
struct in_addr addr;
|
||||
struct in_addr netmask;
|
||||
- struct in_addr gateway;
|
||||
unsigned prefixlen = 0;
|
||||
int r;
|
||||
|
||||
@@ -220,15 +221,15 @@ static int dhcp_lease_lost(Link *link) {
|
||||
|
||||
r = address_new(&address);
|
||||
if (r >= 0) {
|
||||
- r = sd_dhcp_lease_get_router(link->dhcp_lease, &gateway);
|
||||
- if (r >= 0) {
|
||||
+ r = sd_dhcp_lease_get_router(link->dhcp_lease, &router);
|
||||
+ if (r > 0 && !in4_addr_is_null(&router[0])) {
|
||||
_cleanup_(route_freep) Route *route_gw = NULL;
|
||||
_cleanup_(route_freep) Route *route = NULL;
|
||||
|
||||
r = route_new(&route_gw);
|
||||
if (r >= 0) {
|
||||
route_gw->family = AF_INET;
|
||||
- route_gw->dst.in = gateway;
|
||||
+ route_gw->dst.in = router[0];
|
||||
route_gw->dst_prefixlen = 32;
|
||||
route_gw->scope = RT_SCOPE_LINK;
|
||||
|
||||
@@ -238,7 +239,7 @@ static int dhcp_lease_lost(Link *link) {
|
||||
r = route_new(&route);
|
||||
if (r >= 0) {
|
||||
route->family = AF_INET;
|
||||
- route->gw.in = gateway;
|
||||
+ route->gw.in = router[0];
|
||||
|
||||
route_remove(route, link, NULL);
|
||||
}
|
||||
@@ -397,10 +398,10 @@ static int dhcp_lease_renew(sd_dhcp_client *client, Link *link) {
|
||||
}
|
||||
|
||||
static int dhcp_lease_acquired(sd_dhcp_client *client, Link *link) {
|
||||
+ const struct in_addr *router;
|
||||
sd_dhcp_lease *lease;
|
||||
struct in_addr address;
|
||||
struct in_addr netmask;
|
||||
- struct in_addr gateway;
|
||||
unsigned prefixlen;
|
||||
uint32_t lifetime = CACHE_INFO_INFINITY_LIFE_TIME;
|
||||
int r;
|
||||
@@ -422,20 +423,20 @@ static int dhcp_lease_acquired(sd_dhcp_client *client, Link *link) {
|
||||
|
||||
prefixlen = in4_addr_netmask_to_prefixlen(&netmask);
|
||||
|
||||
- r = sd_dhcp_lease_get_router(lease, &gateway);
|
||||
+ r = sd_dhcp_lease_get_router(lease, &router);
|
||||
if (r < 0 && r != -ENODATA)
|
||||
return log_link_error_errno(link, r, "DHCP error: Could not get gateway: %m");
|
||||
|
||||
- if (r >= 0)
|
||||
+ if (r > 0 && !in4_addr_is_null(&router[0]))
|
||||
log_struct(LOG_INFO,
|
||||
LOG_LINK_INTERFACE(link),
|
||||
LOG_LINK_MESSAGE(link, "DHCPv4 address %u.%u.%u.%u/%u via %u.%u.%u.%u",
|
||||
ADDRESS_FMT_VAL(address),
|
||||
prefixlen,
|
||||
- ADDRESS_FMT_VAL(gateway)),
|
||||
+ ADDRESS_FMT_VAL(router[0])),
|
||||
"ADDRESS=%u.%u.%u.%u", ADDRESS_FMT_VAL(address),
|
||||
"PREFIXLEN=%u", prefixlen,
|
||||
- "GATEWAY=%u.%u.%u.%u", ADDRESS_FMT_VAL(gateway));
|
||||
+ "GATEWAY=%u.%u.%u.%u", ADDRESS_FMT_VAL(router[0]));
|
||||
else
|
||||
log_struct(LOG_INFO,
|
||||
LOG_LINK_INTERFACE(link),
|
||||
diff --git a/src/systemd/sd-dhcp-lease.h b/src/systemd/sd-dhcp-lease.h
|
||||
index 4875f10..d299c79 100644
|
||||
--- a/src/systemd/sd-dhcp-lease.h
|
||||
+++ b/src/systemd/sd-dhcp-lease.h
|
||||
@@ -39,7 +39,7 @@ int sd_dhcp_lease_get_t1(sd_dhcp_lease *lease, uint32_t *t1);
|
||||
int sd_dhcp_lease_get_t2(sd_dhcp_lease *lease, uint32_t *t2);
|
||||
int sd_dhcp_lease_get_broadcast(sd_dhcp_lease *lease, struct in_addr *addr);
|
||||
int sd_dhcp_lease_get_netmask(sd_dhcp_lease *lease, struct in_addr *addr);
|
||||
-int sd_dhcp_lease_get_router(sd_dhcp_lease *lease, struct in_addr *addr);
|
||||
+int sd_dhcp_lease_get_router(sd_dhcp_lease *lease, const struct in_addr **addr);
|
||||
int sd_dhcp_lease_get_next_server(sd_dhcp_lease *lease, struct in_addr *addr);
|
||||
int sd_dhcp_lease_get_server_identifier(sd_dhcp_lease *lease, struct in_addr *addr);
|
||||
int sd_dhcp_lease_get_dns(sd_dhcp_lease *lease, const struct in_addr **addr);
|
||||
--
|
||||
2.7.4
|
||||
|
||||
From da9adbb9539bee8886bb680e70616eac8281acd5 Mon Sep 17 00:00:00 2001
|
||||
From: Yu Watanabe <watanabe.yu+github@gmail.com>
|
||||
Date: Tue, 19 Feb 2019 15:09:28 +0900
|
||||
Subject: [PATCH 3/3] network: do not log wrong error cause
|
||||
|
||||
If sd_dhcp_lease_get_router() returns a positive value and the first
|
||||
router is null, then invalid error cause was logged.
|
||||
|
||||
Follow-up for f8862395e8f802e4106a07ceaaf02b6a1faa5a6d.
|
||||
|
||||
(cherry picked from commit 825ace96b1076ac367d2962e3979f62954145812)
|
||||
---
|
||||
src/network/networkd-dhcp4.c | 8 +++++---
|
||||
1 file changed, 5 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/src/network/networkd-dhcp4.c b/src/network/networkd-dhcp4.c
|
||||
index 56512e5..29ad323 100644
|
||||
--- a/src/network/networkd-dhcp4.c
|
||||
+++ b/src/network/networkd-dhcp4.c
|
||||
@@ -125,10 +125,12 @@ static int link_set_dhcp_routes(Link *link) {
|
||||
}
|
||||
|
||||
r = sd_dhcp_lease_get_router(link->dhcp_lease, &router);
|
||||
- if (r < 0 && r != -ENODATA)
|
||||
+ if (IN_SET(r, 0, -ENODATA))
|
||||
+ log_link_info(link, "DHCP: No gateway received from DHCP server.");
|
||||
+ else if (r < 0)
|
||||
log_link_warning_errno(link, r, "DHCP error: could not get gateway: %m");
|
||||
- else if (r <= 0 || in4_addr_is_null(&router[0]))
|
||||
- log_link_info_errno(link, r, "DHCP: No gateway received from DHCP server: %m");
|
||||
+ else if (in4_addr_is_null(&router[0]))
|
||||
+ log_link_info(link, "DHCP: Received gateway is null.");
|
||||
|
||||
/* According to RFC 3442: If the DHCP server returns both a Classless Static Routes option and
|
||||
a Router option, the DHCP client MUST ignore the Router option. */
|
||||
--
|
||||
2.7.4
|
||||
|
|
@ -3,6 +3,7 @@ Name=eth*
|
|||
|
||||
[Network]
|
||||
DHCP=ipv4
|
||||
LinkLocalAddressing=no
|
||||
|
||||
[DHCP]
|
||||
ClientIdentifier=mac
|
||||
|
|
|
@ -67,3 +67,4 @@ BR2_PACKAGE_SYSTEMD_TIMESYNCD=y
|
|||
BR2_PACKAGE_STRACE=y
|
||||
BR2_PACKAGE_SYSSTAT=y
|
||||
BR2_PACKAGE_HTOP=y
|
||||
BR2_PACKAGE_CONNTRACK_TOOLS=y
|
||||
|
|
|
@ -2,7 +2,7 @@ menu "System tools"
|
|||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/runc-master/Config.in"
|
||||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/podman/Config.in"
|
||||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/varlink/Config.in"
|
||||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/conmon-master/Config.in"
|
||||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/conmon/Config.in"
|
||||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/crio-bin/Config.in"
|
||||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/crictl-bin/Config.in"
|
||||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/automount/Config.in"
|
||||
|
@ -13,4 +13,5 @@ menu "System tools"
|
|||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/gluster/Config.in"
|
||||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/vbox-guest/Config.in"
|
||||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/containerd-bin/Config.in"
|
||||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/falco-probe/Config.in"
|
||||
endmenu
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
################################################################################
|
||||
#
|
||||
# conmon
|
||||
#
|
||||
################################################################################
|
||||
|
||||
# HEAD as of 2019-12-11, v2.0.6
|
||||
CONMON_MASTER_VERSION = 29c336700f2999acf9db07662b4a61355076e64a
|
||||
CONMON_MASTER_SITE = https://github.com/containers/conmon/archive
|
||||
CONMON_MASTER_SOURCE = $(CONMON_MASTER_VERSION).tar.gz
|
||||
CONMON_MASTER_LICENSE = Apache-2.0
|
||||
CONMON_MASTER_LICENSE_FILES = LICENSE
|
||||
|
||||
CONMON_MASTER_DEPENDENCIES = host-pkgconf
|
||||
|
||||
define CONMON_MASTER_BUILD_CMDS
|
||||
$(MAKE) $(TARGET_CONFIGURE_OPTS) -C $(@D) GIT_COMMIT=$(CONMON_MASTER_VERSION) PREFIX=/usr
|
||||
endef
|
||||
|
||||
define CONMON_MASTER_INSTALL_TARGET_CMDS
|
||||
$(INSTALL) -Dm755 $(@D)/bin/conmon $(TARGET_DIR)/usr/libexec/crio/conmon
|
||||
$(INSTALL) -Dm755 $(@D)/bin/conmon $(TARGET_DIR)/usr/libexec/podman/conmon
|
||||
endef
|
||||
|
||||
$(eval $(generic-package))
|
|
@ -1,5 +1,5 @@
|
|||
config BR2_PACKAGE_CONMON_MASTER
|
||||
bool "conmon-master"
|
||||
config BR2_PACKAGE_CONMON
|
||||
bool "conmon"
|
||||
depends on BR2_PACKAGE_HOST_GO_ARCH_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_CGO_LINKING_SUPPORTS
|
||||
depends on BR2_TOOLCHAIN_HAS_THREADS
|
|
@ -1,5 +1,8 @@
|
|||
# Locally computed
|
||||
sha256 4f978a59c6ee516f7e3febfb3b0360a17d1be2c283313e1aeb27adcb8c8f9166 dde3ccf93f01ce5a3e0f7a2c97053697cc3ed152.tar.gz
|
||||
sha256 75fad6e66b43c5039719edbd82ba072723aea6a9d4d8be4e7ac1c245a291ab1b 8455ce1ef385120deb827d0f0588c04357bad4c4.tar.gz
|
||||
sha256 32db4ea508a9ff0266b8695ae32604869994371a5a09436438c366a09ddcc1df v0.3.0.tar.gz
|
||||
sha256 6c9bf278ae6e125a39f1ae419e5bd314162a743f6587d70b1b6be095ac32b9af eb5fa88c26fde5ce1e3f8a1d2a8a9498b2d7dbe6.tar.gz
|
||||
sha256 50cc36636c32a343f4c9f5ab6b9f7f5edd5d6ef7c9c403793f799f6605597718 v2.0.3.tar.gz
|
||||
sha256 93f7c127cb536fc60f4c08291fd34e99e492fdc6a36e6b0ddad97d868ecf10f7 29c336700f2999acf9db07662b4a61355076e64a.tar.gz
|
||||
sha256 d82ad6c1e315f8310ed75fe6905f81dce61b61d55a156e9e04c9855e78e1e165 v2.0.6.tar.gz
|
|
@ -0,0 +1,25 @@
|
|||
################################################################################
|
||||
#
|
||||
# conmon
|
||||
#
|
||||
################################################################################
|
||||
|
||||
CONMON_VERSION = v2.0.6
|
||||
CONMON_COMMIT = 29c336700f2999acf9db07662b4a61355076e64a
|
||||
CONMON_SITE = https://github.com/containers/conmon/archive
|
||||
CONMON_SOURCE = $(CONMON_VERSION).tar.gz
|
||||
CONMON_LICENSE = Apache-2.0
|
||||
CONMON_LICENSE_FILES = LICENSE
|
||||
|
||||
CONMON_DEPENDENCIES = host-pkgconf
|
||||
|
||||
define CONMON_BUILD_CMDS
|
||||
$(MAKE) $(TARGET_CONFIGURE_OPTS) -C $(@D) GIT_COMMIT=$(CONMON_COMMIT) PREFIX=/usr
|
||||
endef
|
||||
|
||||
define CONMON_INSTALL_TARGET_CMDS
|
||||
$(INSTALL) -Dm755 $(@D)/bin/conmon $(TARGET_DIR)/usr/libexec/crio/conmon
|
||||
$(INSTALL) -Dm755 $(@D)/bin/conmon $(TARGET_DIR)/usr/libexec/podman/conmon
|
||||
endef
|
||||
|
||||
$(eval $(generic-package))
|
|
@ -66,4 +66,4 @@ oom_score = 0
|
|||
deletion_threshold = 0
|
||||
mutation_threshold = 100
|
||||
schedule_delay = "0s"
|
||||
startup_delay = "100ms"
|
||||
startup_delay = "100ms"
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
oom_score = 0
|
||||
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
max_recv_message_size = 16777216
|
||||
max_send_message_size = 16777216
|
||||
|
||||
[debug]
|
||||
address = ""
|
||||
uid = 0
|
||||
gid = 0
|
||||
level = ""
|
||||
|
||||
[metrics]
|
||||
address = ""
|
||||
grpc_histogram = false
|
||||
|
||||
[cgroup]
|
||||
path = ""
|
||||
|
||||
[plugins]
|
||||
[plugins.cgroups]
|
||||
no_prometheus = false
|
||||
[plugins.cri]
|
||||
stream_server_address = "127.0.0.1"
|
||||
stream_server_port = "0"
|
||||
enable_selinux = false
|
||||
sandbox_image = "k8s.gcr.io/pause:3.1"
|
||||
stats_collect_period = 10
|
||||
systemd_cgroup = false
|
||||
enable_tls_streaming = false
|
||||
max_container_log_line_size = 16384
|
||||
disable_proc_mount = false
|
||||
[plugins.cri.containerd]
|
||||
snapshotter = "overlayfs"
|
||||
no_pivot = false
|
||||
[plugins.cri.containerd.default_runtime]
|
||||
runtime_type = "io.containerd.runtime.v1.linux"
|
||||
runtime_engine = ""
|
||||
runtime_root = ""
|
||||
[plugins.cri.containerd.untrusted_workload_runtime]
|
||||
runtime_type = ""
|
||||
runtime_engine = ""
|
||||
runtime_root = ""
|
||||
[plugins.cri.cni]
|
||||
bin_dir = "/opt/cni/bin"
|
||||
conf_dir = "/etc/cni/net.d"
|
||||
conf_template = ""
|
||||
[plugins.cri.registry]
|
||||
[plugins.cri.registry.mirrors]
|
||||
[plugins.cri.registry.mirrors."docker.io"]
|
||||
endpoint = ["https://registry-1.docker.io"]
|
||||
[plugins.cri.x509_key_pair_streaming]
|
||||
tls_cert_file = ""
|
||||
tls_key_file = ""
|
||||
[plugins.diff-service]
|
||||
default = ["walking"]
|
||||
[plugins.linux]
|
||||
shim = "containerd-shim"
|
||||
runtime = "runc"
|
||||
runtime_root = ""
|
||||
no_shim = false
|
||||
shim_debug = false
|
||||
[plugins.opt]
|
||||
path = "/opt/containerd"
|
||||
[plugins.restart]
|
||||
interval = "10s"
|
||||
[plugins.scheduler]
|
||||
pause_threshold = 0.02
|
||||
deletion_threshold = 0
|
||||
mutation_threshold = 100
|
||||
schedule_delay = "0s"
|
||||
startup_delay = "100ms"
|
|
@ -2,3 +2,4 @@ sha256 ccf83574556793ceb01717dc91c66b70f183c60c2bbec70283939aae8fdef768 crictl-
|
|||
sha256 9bdbea7a2b382494aff2ff014da328a042c5aba9096a7772e57fdf487e5a1d51 crictl-v1.13.0-linux-amd64.tar.gz
|
||||
sha256 c3b71be1f363e16078b51334967348aab4f72f46ef64a61fe7754e029779d45a crictl-v1.15.0-linux-amd64.tar.gz
|
||||
sha256 19fed421710fccfe58f5573383bb137c19438a9056355556f1a15da8d23b3ad1 crictl-v1.16.1-linux-amd64.tar.gz
|
||||
sha256 7b72073797f638f099ed19550d52e9b9067672523fc51b746e65d7aa0bafa414 crictl-v1.17.0-linux-amd64.tar.gz
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#
|
||||
################################################################################
|
||||
|
||||
CRICTL_BIN_VERSION = v1.16.1
|
||||
CRICTL_BIN_VERSION = v1.17.0
|
||||
CRICTL_BIN_SITE = https://github.com/kubernetes-sigs/cri-tools/releases/download/$(CRICTL_BIN_VERSION)
|
||||
CRICTL_BIN_SOURCE = crictl-$(CRICTL_BIN_VERSION)-linux-amd64.tar.gz
|
||||
CRICTL_BIN_STRIP_COMPONENTS = 0
|
||||
|
|
|
@ -9,6 +9,7 @@ config BR2_PACKAGE_CRIO_BIN
|
|||
depends on !BR2_STATIC_LIBS # lvm2
|
||||
depends on !BR2_TOOLCHAIN_USES_MUSL # lvm2
|
||||
select BR2_PACKAGE_RUNC_MASTER
|
||||
select BR2_PACKAGE_CONMON
|
||||
select BR2_PACKAGE_BTRFS_PROGS
|
||||
select BR2_PACKAGE_LIBSECCOMP
|
||||
select BR2_PACKAGE_LIBGPGME
|
||||
|
|
|
@ -43,8 +43,8 @@ define CRIO_BIN_INSTALL_TARGET_CMDS
|
|||
$(@D)/bin/crio \
|
||||
$(TARGET_DIR)/usr/bin/crio
|
||||
$(INSTALL) -Dm755 \
|
||||
$(@D)/bin/pause \
|
||||
$(TARGET_DIR)/usr/libexec/crio/pause
|
||||
$(@D)/bin/pinns \
|
||||
$(TARGET_DIR)/usr/bin/pinns
|
||||
$(INSTALL) -Dm644 \
|
||||
$(CRIO_BIN_PKGDIR)/crio.conf \
|
||||
$(TARGET_DIR)/etc/crio/crio.conf
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
config BR2_PACKAGE_FALCO_PROBE
|
||||
bool "falco-probe"
|
||||
default y
|
||||
depends on BR2_LINUX_KERNEL
|
||||
select BR2_PACKAGE_NCURSES
|
||||
select BR2_PACKAGE_LIBYAML
|
|
@ -0,0 +1,4 @@
|
|||
# falco
|
||||
sha256 87c60273c35d544256e471b403497be33f24df662673338236ec92ba3fc1f8b7 0.19.0.tar.gz
|
||||
# sysdig
|
||||
sha256 6e477ac5fe9d3110b870bd4495f01541373a008c375a1934a2d1c46798b6bad6 146a431edf95829ac11bfd9c85ba3ef08789bffe.tar.gz
|
|
@ -0,0 +1,34 @@
|
|||
########################################################################
|
||||
#
|
||||
# Falco probe (driver) kernel module
|
||||
#
|
||||
########################################################################
|
||||
|
||||
FALCO_PROBE_VERSION = 0.19.0
|
||||
FALCO_PROBE_SITE = https://github.com/falcosecurity/falco/archive
|
||||
FALCO_PROBE_SOURCE = $(FALCO_PROBE_VERSION).tar.gz
|
||||
FALCO_PROBE_DEPENDENCIES += ncurses libyaml
|
||||
FALCO_PROBE_LICENSE = Apache-2.0
|
||||
FALCO_PROBE_LICENSE_FILES = COPYING
|
||||
|
||||
# see cmake/modules/sysdig-repo/CMakeLists.txt
|
||||
FALCO_PROBE_SYSDIG_VERSION = 146a431edf95829ac11bfd9c85ba3ef08789bffe
|
||||
FALCO_PROBE_EXTRA_DOWNLOADS = https://github.com/draios/sysdig/archive/${FALCO_PROBE_SYSDIG_VERSION}.tar.gz
|
||||
|
||||
define FALCO_PROBE_SYSDIG_SRC
|
||||
sed -e 's|URL ".*"|URL "'$(FALCO_PROBE_DL_DIR)/$(FALCO_PROBE_SYSDIG_VERSION).tar.gz'"|' -i $(@D)/cmake/modules/sysdig-repo/CMakeLists.txt
|
||||
endef
|
||||
|
||||
FALCO_PROBE_POST_EXTRACT_HOOKS += FALCO_PROBE_SYSDIG_SRC
|
||||
|
||||
FALCO_PROBE_CONF_OPTS = -DFALCO_VERSION=$(FALCO_PROBE_VERSION)
|
||||
FALCO_PROBE_CONF_OPTS += -DSYSDIG_VERSION=$(FALCO_PROBE_SYSDIG_VERSION)
|
||||
FALCO_PROBE_CONF_OPTS += -DUSE_BUNDLED_DEPS=ON
|
||||
|
||||
FALCO_PROBE_MAKE_OPTS = driver KERNELDIR=$(LINUX_DIR)
|
||||
FALCO_PROBE_INSTALL_OPTS = install_driver
|
||||
FALCO_PROBE_INSTALL_STAGING_OPTS = INSTALL_MOD_PATH=$(STAGING_DIR) install_driver
|
||||
FALCO_PROBE_INSTALL_TARGET_OPTS = INSTALL_MOD_PATH=$(TARGET_DIR) install_driver
|
||||
|
||||
$(eval $(kernel-module))
|
||||
$(eval $(cmake-package))
|
|
@ -4,7 +4,7 @@
|
|||
#
|
||||
################################################################################
|
||||
|
||||
HYPERV_DAEMONS_VERSION = 4.19.88
|
||||
HYPERV_DAEMONS_VERSION = $(call qstrip,$(BR2_LINUX_KERNEL_VERSION))
|
||||
HYPERV_DAEMONS_SITE = https://www.kernel.org/pub/linux/kernel/v4.x
|
||||
HYPERV_DAEMONS_SOURCE = linux-$(HYPERV_DAEMONS_VERSION).tar.xz
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ config BR2_PACKAGE_PODMAN
|
|||
depends on BR2_PACKAGE_HOST_GO_ARCH_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_CGO_LINKING_SUPPORTS
|
||||
depends on BR2_TOOLCHAIN_HAS_THREADS
|
||||
select BR2_PACKAGE_CONMON_MASTER
|
||||
select BR2_PACKAGE_RUNC_MASTER
|
||||
select BR2_PACKAGE_CONMON
|
||||
select BR2_PACKAGE_LIBSECCOMP
|
||||
select BR2_PACKAGE_LIBGPGME
|
||||
|
|
|
@ -11,4 +11,4 @@ sha256 45eb7bccd81a1431b0c7a0697829c0bcc397048595d143fd91179b31d22a3c63 v1.4.1.t
|
|||
sha256 2e027c1b935f3a03f27ef7f17823ccf334607a17d033d4ce53a90b98294e7f68 v1.4.4.tar.gz
|
||||
sha256 61b44b739c485125f179044f7aa7dc58c820f771bce4ce495fa555a38dc68b57 v1.6.3.tar.gz
|
||||
sha256 6e59821320b435543bc7554e73faa66d5956e4ad3f7e7f4ea03bebd6726758e9 v1.6.4.tar.gz
|
||||
sha256 6e59821320b435543bc7554e73faa66d5956e4ad3f7e7f4ea03bebd6726758e9 v1.6.5.tar.gz
|
||||
sha256 50960293c2019e38ce69e4cf5f0a683e7fea1562b180e38e38c9355fcd7c4f0d v1.6.5.tar.gz
|
||||
|
|
|
@ -4,4 +4,12 @@ VARLINK_SOURCE = $(VARLINK_VERSION).tar.gz
|
|||
VARLINK_LICENSE = Apache-2.0
|
||||
VARLINK_LICENSE_FILES = LICENSE
|
||||
|
||||
VARLINK_NEEDS_HOST_PYTHON = python3
|
||||
|
||||
define VARLINK_ENV_PYTHON3
|
||||
sed -e 's|/usr/bin/python3|/usr/bin/env python3|' -i $(@D)/varlink-wrapper.py
|
||||
endef
|
||||
|
||||
VARLINK_POST_EXTRACT_HOOKS += VARLINK_ENV_PYTHON3
|
||||
|
||||
$(eval $(meson-package))
|
||||
|
|
|
@ -1,4 +1,12 @@
|
|||
[
|
||||
{
|
||||
"name": "v1.7.3",
|
||||
"checksums": {
|
||||
"darwin": "e8d762357123773f6c4dc300f8bccec3cdf2326c94f03a8aeb934e4e73fd59b8",
|
||||
"linux": "575adc22884b49ecce9c9d289a7127b64f2759f639cb894c3040890bee1939c5",
|
||||
"windows": "0fdc0d60e36001c021b6cc09e699e2a38b4070661cf7e78badf750ee84340afa"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "v1.7.2",
|
||||
"checksums": {
|
||||
|
|
56
go.mod
56
go.mod
|
@ -70,11 +70,11 @@ require (
|
|||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47
|
||||
golang.org/x/text v0.3.2
|
||||
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect
|
||||
k8s.io/api v0.17.2
|
||||
k8s.io/apimachinery v0.17.2
|
||||
k8s.io/client-go v11.0.0+incompatible
|
||||
k8s.io/api v0.17.3
|
||||
k8s.io/apimachinery v0.17.3
|
||||
k8s.io/client-go v0.17.3
|
||||
k8s.io/kubectl v0.0.0
|
||||
k8s.io/kubernetes v1.17.2
|
||||
k8s.io/kubernetes v1.17.3
|
||||
k8s.io/utils v0.0.0-20200122174043-1e243dd1a584 // indirect
|
||||
sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.0+incompatible
|
||||
)
|
||||
|
@ -85,28 +85,28 @@ replace (
|
|||
github.com/docker/machine => github.com/machine-drivers/machine v0.7.1-0.20191109154235-b39d5b50de51
|
||||
github.com/hashicorp/go-getter => github.com/afbjorklund/go-getter v1.4.1-0.20190910175809-eb9f6c26742c
|
||||
github.com/samalba/dockerclient => github.com/sayboras/dockerclient v0.0.0-20191231050035-015626177a97
|
||||
k8s.io/api => k8s.io/api v0.17.2
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.17.2
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.17.2
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.17.2
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.17.2
|
||||
k8s.io/client-go => k8s.io/client-go v0.17.2
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.17.2
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.17.2
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.17.2
|
||||
k8s.io/component-base => k8s.io/component-base v0.17.2
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.17.2
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.17.2
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.17.2
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.17.2
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.17.2
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.17.2
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.17.2
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.17.2
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.17.2
|
||||
k8s.io/metrics => k8s.io/metrics v0.17.2
|
||||
k8s.io/node-api => k8s.io/node-api v0.17.2
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.17.2
|
||||
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.17.2
|
||||
k8s.io/sample-controller => k8s.io/sample-controller v0.17.2
|
||||
k8s.io/api => k8s.io/api v0.17.3
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.17.3
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.17.3
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.17.3
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.17.3
|
||||
k8s.io/client-go => k8s.io/client-go v0.17.3
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.17.3
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.17.3
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.17.3
|
||||
k8s.io/component-base => k8s.io/component-base v0.17.3
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.17.3
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.17.3
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.17.3
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.17.3
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.17.3
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.17.3
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.17.3
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.17.3
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.17.3
|
||||
k8s.io/metrics => k8s.io/metrics v0.17.3
|
||||
k8s.io/node-api => k8s.io/node-api v0.17.3
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.17.3
|
||||
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.17.3
|
||||
k8s.io/sample-controller => k8s.io/sample-controller v0.17.3
|
||||
)
|
||||
|
|
65
go.sum
65
go.sum
|
@ -938,6 +938,8 @@ gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
|||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
|
@ -949,24 +951,23 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
k8s.io/api v0.17.2 h1:NF1UFXcKN7/OOv1uxdRz3qfra8AHsPav5M93hlV9+Dc=
|
||||
k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4=
|
||||
k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs=
|
||||
k8s.io/apimachinery v0.17.2 h1:hwDQQFbdRlpnnsR64Asdi55GyCaIP/3WQpMmbNBeWr4=
|
||||
k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
||||
k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo=
|
||||
k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI=
|
||||
k8s.io/client-go v0.17.2 h1:ndIfkfXEGrNhLIgkr0+qhRguSD3u6DCmonepn1O6NYc=
|
||||
k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI=
|
||||
k8s.io/cloud-provider v0.17.2/go.mod h1:9rEcGqEsUFHxC83oMUGBcsXTBpRVNVPX/U+nyQJTvHU=
|
||||
k8s.io/cluster-bootstrap v0.17.2 h1:KVjK1WviylwbBwC+3L51xKmGN3A+WmzW8rhtcfWdUqQ=
|
||||
k8s.io/cluster-bootstrap v0.17.2/go.mod h1:qiazpAM05fjAc+PEkrY8HSUhKlJSMBuLnVUSO6nvZL4=
|
||||
k8s.io/code-generator v0.17.2 h1:pTwl3rLB1fUyxmvEzmVPMM0tBSdUehd7z+bDzpj4lPE=
|
||||
k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
|
||||
k8s.io/component-base v0.17.2 h1:0XHf+cerTvL9I5Xwn9v+0jmqzGAZI7zNydv4tL6Cw6A=
|
||||
k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs=
|
||||
k8s.io/cri-api v0.17.2/go.mod h1:BzAkbBHHp81d+aXzbiIcUbilLkbXa40B8mUHOk6EX3s=
|
||||
k8s.io/csi-translation-lib v0.17.2/go.mod h1:NrhnhXJg/V6cHRTdPbmxvBuV3rJSqXsdLBE5JSRzcVI=
|
||||
k8s.io/api v0.17.3 h1:XAm3PZp3wnEdzekNkcmj/9Y1zdmQYJ1I4GKSBBZ8aG0=
|
||||
k8s.io/api v0.17.3/go.mod h1:YZ0OTkuw7ipbe305fMpIdf3GLXZKRigjtZaV5gzC2J0=
|
||||
k8s.io/apiextensions-apiserver v0.17.3/go.mod h1:CJbCyMfkKftAd/X/V6OTHYhVn7zXnDdnkUjS1h0GTeY=
|
||||
k8s.io/apimachinery v0.17.3 h1:f+uZV6rm4/tHE7xXgLyToprg6xWairaClGVkm2t8omg=
|
||||
k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g=
|
||||
k8s.io/apiserver v0.17.3/go.mod h1:iJtsPpu1ZpEnHaNawpSV0nYTGBhhX2dUlnn7/QS7QiY=
|
||||
k8s.io/cli-runtime v0.17.3/go.mod h1:X7idckYphH4SZflgNpOOViSxetiMj6xI0viMAjM81TA=
|
||||
k8s.io/client-go v0.17.3 h1:deUna1Ksx05XeESH6XGCyONNFfiQmDdqeqUvicvP6nU=
|
||||
k8s.io/client-go v0.17.3/go.mod h1:cLXlTMtWHkuK4tD360KpWz2gG2KtdWEr/OT02i3emRQ=
|
||||
k8s.io/cloud-provider v0.17.3/go.mod h1:JBkKSQpbcjcYGDqH5PbifFrcgQ/7WOXRswnfLVbXpI8=
|
||||
k8s.io/cluster-bootstrap v0.17.3 h1:J0fKY0kTtD9ZFaLHAoWXhjXlb4m1g9BsF2YNgMRJsxU=
|
||||
k8s.io/cluster-bootstrap v0.17.3/go.mod h1:ujIYnCKnxY/MecpgPx9WgiYCVCFvici6tVIfI2FiI1g=
|
||||
k8s.io/code-generator v0.17.3/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ=
|
||||
k8s.io/component-base v0.17.3 h1:hQzTSshY14aLSR6WGIYvmw+w+u6V4d+iDR2iDGMrlUg=
|
||||
k8s.io/component-base v0.17.3/go.mod h1:GeQf4BrgelWm64PXkIXiPh/XS0hnO42d9gx9BtbZRp8=
|
||||
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
|
||||
k8s.io/csi-translation-lib v0.17.3/go.mod h1:FBya8XvGIqDm2/3evLQNxaFXqv/C2UcZa5JgJt6/qqY=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
|
@ -975,23 +976,23 @@ k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUc
|
|||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/kube-aggregator v0.17.2/go.mod h1:8xQTzaH0GrcKPiSB4YYWwWbeQ0j/4zRsbQt8usEMbRg=
|
||||
k8s.io/kube-controller-manager v0.17.2/go.mod h1:xznSbCHdVODF5StxiBMh3s6HenyCBdsedazlsh6/J3M=
|
||||
k8s.io/kube-aggregator v0.17.3/go.mod h1:1dMwMFQbmH76RKF0614L7dNenMl3dwnUJuOOyZ3GMXA=
|
||||
k8s.io/kube-controller-manager v0.17.3/go.mod h1:22B/TsgVviuCVuNwUrqgyTi5D4AYjMFaK9c8h1oonkY=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
||||
k8s.io/kube-proxy v0.17.2 h1:DuZwJ9bdsANZ9DEa5dBkPU9KpR0HU4wFkDOAWQvOpIs=
|
||||
k8s.io/kube-proxy v0.17.2/go.mod h1:PVY+Cqds8qa/TLEqiSgDPgwWBiRHYjeS4kvp/C5dYjc=
|
||||
k8s.io/kube-scheduler v0.17.2/go.mod h1:BlP/p3YDLgsEIshEj4gbGjV11j4BQjNx7vbwRcLGnI8=
|
||||
k8s.io/kubectl v0.17.2 h1:QZR8Q6lWiVRjwKslekdbN5WPMp53dS/17j5e+oi5XVU=
|
||||
k8s.io/kubectl v0.17.2/go.mod h1:y4rfLV0n6aPmvbRCqZQjvOp3ezxsFgpqL+zF5jH/lxk=
|
||||
k8s.io/kubelet v0.17.2 h1:AggINdoeh58BjlM8oWy9XPLcWde8MIs44+/HJpyfPi0=
|
||||
k8s.io/kubelet v0.17.2/go.mod h1:XUOu5Fcnkx44FP13w5etBrn2GhK4D02CUcFA8tLtUKU=
|
||||
k8s.io/kubernetes v1.17.2 h1:g1UFZqFQsYx88xMUks4PKC6tsNcekxe0v06fcVGRwVE=
|
||||
k8s.io/kubernetes v1.17.2/go.mod h1:NbNV+69yL3eKiKDJ+ZEjqOplN3BFXKBeunzkoOy8WLo=
|
||||
k8s.io/legacy-cloud-providers v0.17.2/go.mod h1:a/qbE67VbTzWOemWfqH0wlcX31zxt4UOxFqZuBltY9Q=
|
||||
k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw=
|
||||
k8s.io/kube-proxy v0.17.3 h1:1sbA5umSKcTkkRo9k0igoEkearZ8bFnzGnzDlbhDkSo=
|
||||
k8s.io/kube-proxy v0.17.3/go.mod h1:ds8R8bUYPWtQlspC47Sff7o5aQhWDsv6jpQJATDuqaQ=
|
||||
k8s.io/kube-scheduler v0.17.3/go.mod h1:36HgrrPqzK+rOLTRtDG//b89KjrAZqFI4PXOpdH351M=
|
||||
k8s.io/kubectl v0.17.3 h1:9HHYj07kuFkM+sMJMOyQX29CKWq4lvKAG1UIPxNPMQ4=
|
||||
k8s.io/kubectl v0.17.3/go.mod h1:NUn4IBY7f7yCMwSop2HCXlw/MVYP4HJBiUmOR3n9w28=
|
||||
k8s.io/kubelet v0.17.3 h1:e6oo26NusXf6i7Pp6i5tWXT+Z/oQW0XzxiWxa5pBgw0=
|
||||
k8s.io/kubelet v0.17.3/go.mod h1:Nh8owUHZcUXtnDAtmGnip36Nw+X6c4rbmDQlVyIhwMQ=
|
||||
k8s.io/kubernetes v1.17.3 h1:zWCppkLfHM+hoLqfbsrQ0cJnYw+4vAvedI92oQnjo/Q=
|
||||
k8s.io/kubernetes v1.17.3/go.mod h1:gt28rfzaskIzJ8d82TSJmGrJ0XZD0BBy8TcQvTuCI3w=
|
||||
k8s.io/legacy-cloud-providers v0.17.3/go.mod h1:ujZML5v8efVQxiXXTG+nck7SjP8KhMRjUYNIsoSkYI0=
|
||||
k8s.io/metrics v0.17.3/go.mod h1:HEJGy1fhHOjHggW9rMDBJBD3YuGroH3Y1pnIRw9FFaI=
|
||||
k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8=
|
||||
k8s.io/sample-apiserver v0.17.2/go.mod h1:JLhi1DSBMlKAckfVdV1YNuz49EKNlfSflW+6LVDf1Mo=
|
||||
k8s.io/sample-apiserver v0.17.3/go.mod h1:cn/rvFIttGNqy1v88B5ZlDAbyyqDOoF7JHSwPiqNCNQ=
|
||||
k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
|
|
|
@ -1,18 +1,27 @@
|
|||
ARG COMMIT_SHA
|
||||
# using base image created by kind https://github.com/kubernetes-sigs/kind
|
||||
# using base image created by kind https://github.com/kubernetes-sigs/kind/blob/master/images/base/Dockerfile
|
||||
# which is an ubuntu 19.10 with an entry-point that helps running systemd
|
||||
# could be changed to any debian that can run systemd
|
||||
FROM kindest/base:v20200122-2dfe64b2 as base
|
||||
USER root
|
||||
# specify version of everything explicitly using 'apt-cache policy'
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
sudo \
|
||||
dnsutils \
|
||||
openssh-server \
|
||||
docker.io \
|
||||
&& apt-get clean -y
|
||||
# disable containerd by default
|
||||
RUN systemctl disable containerd
|
||||
RUN rm /etc/crictl.yaml
|
||||
lz4=1.9.1-1 \
|
||||
gnupg=2.2.12-1ubuntu3 \
|
||||
sudo=1.8.27-1ubuntu4.1 \
|
||||
docker.io=19.03.2-0ubuntu1 \
|
||||
openssh-server=1:8.0p1-6build1 \
|
||||
dnsutils=1:9.11.5.P4+dfsg-5.1ubuntu2.1 \
|
||||
&& rm /etc/crictl.yaml
|
||||
# install cri-o based on https://github.com/cri-o/cri-o/commit/96b0c34b31a9fc181e46d7d8e34fb8ee6c4dc4e1#diff-04c6e90faac2675aa89e2176d2eec7d8R128
|
||||
RUN sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_19.10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" && \
|
||||
curl -LO https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_19.10/Release.key && \
|
||||
apt-key add - < Release.key && apt-get update && \
|
||||
apt-get install -y --no-install-recommends cri-o-1.17=1.17.0-3
|
||||
# install podman
|
||||
RUN apt-get install -y --no-install-recommends podman=1.8.0~7
|
||||
# disable non-docker runtimes by default
|
||||
RUN systemctl disable containerd && systemctl disable crio && rm /etc/crictl.yaml
|
||||
# enable docker which is default
|
||||
RUN systemctl enable docker
|
||||
# making SSH work for docker container
|
||||
|
@ -22,18 +31,18 @@ RUN echo 'root:root' |chpasswd
|
|||
RUN sed -ri 's/^#?PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config
|
||||
RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config
|
||||
EXPOSE 22
|
||||
# for minikube ssh. to match VM using "docker" as username
|
||||
# create docker user for minikube ssh. to match VM using "docker" as username
|
||||
RUN adduser --ingroup docker --disabled-password --gecos '' docker
|
||||
RUN adduser docker sudo
|
||||
RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
|
||||
USER docker
|
||||
RUN mkdir /home/docker/.ssh
|
||||
# Deleting leftovers
|
||||
USER root
|
||||
# kind base-image entry-point expects a "kind" folder for product_name,product_uuid
|
||||
# https://github.com/kubernetes-sigs/kind/blob/master/images/base/files/usr/local/bin/entrypoint
|
||||
RUN mkdir -p /kind
|
||||
RUN rm -rf \
|
||||
# Deleting leftovers
|
||||
RUN apt-get clean -y && rm -rf \
|
||||
/var/cache/debconf/* \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/log/* \
|
||||
|
|
|
@ -33,6 +33,9 @@ export PATH=$PATH:"/usr/local/bin/:/usr/local/go/bin/:$GOPATH/bin"
|
|||
# installing golang so we could do go get for gopogh
|
||||
sudo ./installers/check_install_golang.sh "1.13.4" "/usr/local" || true
|
||||
|
||||
docker rm -f $(docker ps -aq) >/dev/null 2>&1 || true
|
||||
docker volume prune -f || true
|
||||
docker system df || true
|
||||
|
||||
echo ">> Starting at $(date)"
|
||||
echo ""
|
||||
|
@ -324,17 +327,33 @@ touch "${JSON_OUT}"
|
|||
# Generate JSON output
|
||||
echo ">> Running go test2json"
|
||||
go tool test2json -t < "${TEST_OUT}" > "${JSON_OUT}" || true
|
||||
|
||||
if ! type "jq" > /dev/null; then
|
||||
echo ">> Installing jq"
|
||||
if [ "$(uname)" != "Darwin" ]; then
|
||||
curl -LO https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 && sudo install jq-linux64 /usr/local/bin/jq
|
||||
else
|
||||
curl -LO https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 && sudo install jq-osx-amd64 /usr/local/bin/jq
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ">> Installing gopogh"
|
||||
cd $(mktemp -d)
|
||||
GO111MODULE="on" go get -u github.com/medyagh/gopogh@v0.0.17 || true
|
||||
cd -
|
||||
if [ "$(uname)" != "Darwin" ]; then
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64 && sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
|
||||
else
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-darwin-amd64 && sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh
|
||||
fi
|
||||
|
||||
echo ">> Running gopogh"
|
||||
if test -f "${HTML_OUT}"; then
|
||||
rm "${HTML_OUT}" || true # clean up previous runs of same build
|
||||
fi
|
||||
|
||||
touch "${HTML_OUT}"
|
||||
pessimistic_status=$(gopogh -in "${JSON_OUT}" -out "${HTML_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}") || true
|
||||
gopogh_status=$(gopogh -in "${JSON_OUT}" -out "${HTML_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}") || true
|
||||
fail_num=$(echo $gopogh_status | jq '.NumberOfFail')
|
||||
test_num=$(echo $gopogh_status | jq '.NumberOfTests')
|
||||
pessimistic_status="$completed with ${fail_num} / ${test_num} failures in ${elapsed}"
|
||||
description="completed with ${status} in ${elapsed} minute(s)."
|
||||
if [ "$status" = "failure" ]; then
|
||||
description="completed with ${pessimistic_status} in ${elapsed} minute(s)."
|
||||
|
@ -400,11 +419,10 @@ function retry_github_status() {
|
|||
echo "HTTP code ${code}! Retrying in ${timeout} .."
|
||||
sleep "${timeout}"
|
||||
attempt=$(( attempt + 1 ))
|
||||
timeout=$(( timeout * 2 ))
|
||||
timeout=$(( timeout * 5 ))
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
|
||||
retry_github_status "${COMMIT}" "${JOB_NAME}" "${status}" "${access_token}" "${public_log_url}" "${description}"
|
||||
exit $result
|
||||
|
|
|
@ -46,7 +46,7 @@ sudo rm -rf /data/*
|
|||
sudo rm -rf /etc/kubernetes/*
|
||||
sudo rm -rf /var/lib/minikube/*
|
||||
|
||||
# Stop any leftover kubelets
|
||||
# Stop any leftover kubelet
|
||||
systemctl is-active --quiet kubelet \
|
||||
&& echo "stopping kubelet" \
|
||||
&& sudo systemctl stop kubelet
|
||||
|
|
|
@ -74,7 +74,7 @@ function retry_github_status() {
|
|||
echo "HTTP code ${code}! Retrying in ${timeout} .."
|
||||
sleep "${timeout}"
|
||||
attempt=$(( attempt + 1 ))
|
||||
timeout=$(( timeout * 2 ))
|
||||
timeout=$(( timeout * 5 ))
|
||||
done
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ func (d *Driver) Create() error {
|
|||
params := oci.CreateParams{
|
||||
Name: d.NodeConfig.MachineName,
|
||||
Image: d.NodeConfig.ImageDigest,
|
||||
ClusterLabel: oci.ClusterLabelKey + "=" + d.MachineName,
|
||||
ClusterLabel: oci.ProfileLabelKey + "=" + d.MachineName,
|
||||
CPUs: strconv.Itoa(d.NodeConfig.CPU),
|
||||
Memory: strconv.Itoa(d.NodeConfig.Memory) + "mb",
|
||||
Envs: d.NodeConfig.Envs,
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package oci
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RoutableHostIPFromInside returns the ip/dns of the host that container lives on
|
||||
// is routable from inside the container
|
||||
func RoutableHostIPFromInside(ociBin string, containerName string) (net.IP, error) {
|
||||
if ociBin != Docker {
|
||||
return nil, fmt.Errorf("RoutableHostIPFromInside is currently only implemented for docker https://github.com/containers/libpod/issues/5205")
|
||||
}
|
||||
if runtime.GOOS == "linux" {
|
||||
return dockerGatewayIP()
|
||||
}
|
||||
// for windows and mac, the gateway ip is not routable so we use dns trick.
|
||||
return digDNS(ociBin, containerName, "host.docker.internal")
|
||||
}
|
||||
|
||||
// digDNS will get the IP record for a dns
|
||||
func digDNS(ociBin, containerName, dns string) (net.IP, error) {
|
||||
if err := PointToHostDockerDaemon(); err != nil {
|
||||
return nil, errors.Wrap(err, "point host docker-daemon")
|
||||
}
|
||||
cmd := exec.Command(ociBin, "exec", "-t", containerName, "dig", "+short", dns)
|
||||
out, err := cmd.CombinedOutput()
|
||||
ip := net.ParseIP(strings.TrimSpace(string(out)))
|
||||
if err != nil {
|
||||
return ip, errors.Wrapf(err, "resolve dns to ip: %s", string(out))
|
||||
}
|
||||
glog.Infof("got host ip for mount in container by digging dns: %s", ip.String())
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
// dockerGatewayIP gets the default gateway ip for the docker bridge on the user's host machine
|
||||
// gets the ip from user's host docker
|
||||
func dockerGatewayIP() (net.IP, error) {
|
||||
if err := PointToHostDockerDaemon(); err != nil {
|
||||
return nil, errors.Wrap(err, "point host docker-daemon")
|
||||
}
|
||||
cmd := exec.Command(Docker, "network", "ls", "--filter", "name=bridge", "--format", "{{.ID}}")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "get network bridge. output: %s", string(out))
|
||||
}
|
||||
bridgeID := strings.TrimSpace(string(out))
|
||||
cmd = exec.Command(Docker, "inspect",
|
||||
"--format", "{{(index .IPAM.Config 0).Gateway}}", bridgeID)
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "inspect IP gatway for bridge network: %q. output: %s", string(out), bridgeID)
|
||||
}
|
||||
ip := net.ParseIP(strings.TrimSpace(string(out)))
|
||||
glog.Infof("got host ip for mount in container by inspect docker network: %s", ip.String())
|
||||
return ip, nil
|
||||
}
|
|
@ -34,6 +34,31 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// DeleteAllContainersByLabel deletes all containers that have a specific label
|
||||
// if there no containers found with the given label, it will return nil
|
||||
func DeleteAllContainersByLabel(ociBin string, label string) []error {
|
||||
var deleteErrs []error
|
||||
if ociBin == Docker {
|
||||
if err := PointToHostDockerDaemon(); err != nil {
|
||||
return []error{errors.Wrap(err, "point host docker-daemon")}
|
||||
}
|
||||
}
|
||||
cs, err := listContainersByLabel(ociBin, label)
|
||||
if err != nil {
|
||||
return []error{fmt.Errorf("listing containers by label %q", label)}
|
||||
}
|
||||
if len(cs) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, c := range cs {
|
||||
cmd := exec.Command(ociBin, "rm", "-f", "-v", c)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
deleteErrs = append(deleteErrs, errors.Wrapf(err, "delete container %s: output %s", c, out))
|
||||
}
|
||||
}
|
||||
return deleteErrs
|
||||
}
|
||||
|
||||
// CreateContainerNode creates a new container node
|
||||
func CreateContainerNode(p CreateParams) error {
|
||||
if err := PointToHostDockerDaemon(); err != nil {
|
||||
|
@ -57,24 +82,28 @@ func CreateContainerNode(p CreateParams) error {
|
|||
"-v", "/lib/modules:/lib/modules:ro",
|
||||
"--hostname", p.Name, // make hostname match container name
|
||||
"--name", p.Name, // ... and set the container name
|
||||
"--label", fmt.Sprintf("%s=%s", CreatedByLabelKey, "true"),
|
||||
// label the node with the cluster ID
|
||||
"--label", p.ClusterLabel,
|
||||
// label the node with the role ID
|
||||
"--label", fmt.Sprintf("%s=%s", nodeRoleKey, p.Role),
|
||||
}
|
||||
|
||||
// volume path in minikube home folder to mount to /var
|
||||
hostVarVolPath := filepath.Join(localpath.MiniPath(), "machines", p.Name, "var")
|
||||
if err := os.MkdirAll(hostVarVolPath, 0711); err != nil {
|
||||
return errors.Wrapf(err, "create var dir %s", hostVarVolPath)
|
||||
"--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, p.Role),
|
||||
}
|
||||
|
||||
if p.OCIBinary == Podman { // enable execing in /var
|
||||
// volume path in minikube home folder to mount to /var
|
||||
hostVarVolPath := filepath.Join(localpath.MiniPath(), "machines", p.Name, "var")
|
||||
if err := os.MkdirAll(hostVarVolPath, 0711); err != nil {
|
||||
return errors.Wrapf(err, "create var dir %s", hostVarVolPath)
|
||||
}
|
||||
// podman mounts var/lib with no-exec by default https://github.com/containers/libpod/issues/5103
|
||||
runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var:exec", hostVarVolPath))
|
||||
}
|
||||
if p.OCIBinary == Docker {
|
||||
runArgs = append(runArgs, "--volume", "/var")
|
||||
if err := createDockerVolume(p.Name); err != nil {
|
||||
return errors.Wrapf(err, "creating volume for %s container", p.Name)
|
||||
}
|
||||
glog.Infof("Successfully created a docker volume %s", p.Name)
|
||||
runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var", p.Name))
|
||||
// setting resource limit in privileged mode is only supported by docker
|
||||
// podman error: "Error: invalid configuration, cannot set resources with rootless containers not using cgroups v2 unified mode"
|
||||
runArgs = append(runArgs, fmt.Sprintf("--cpus=%s", p.CPUs), fmt.Sprintf("--memory=%s", p.Memory))
|
||||
|
@ -264,7 +293,7 @@ func ContainerID(ociBinary string, nameOrID string) (string, error) {
|
|||
|
||||
// ListOwnedContainers lists all the containres that kic driver created on user's machine using a label
|
||||
func ListOwnedContainers(ociBinary string) ([]string, error) {
|
||||
return listContainersByLabel(ociBinary, ClusterLabelKey)
|
||||
return listContainersByLabel(ociBinary, ProfileLabelKey)
|
||||
}
|
||||
|
||||
// inspect return low-level information on containers
|
||||
|
@ -395,23 +424,23 @@ func withPortMappings(portMappings []PortMapping) createOpt {
|
|||
}
|
||||
}
|
||||
|
||||
// listContainersByLabel lists all the containres that kic driver created on user's machine using a label
|
||||
// io.x-k8s.kic.cluster
|
||||
// listContainersByLabel returns all the container names with a specified label
|
||||
func listContainersByLabel(ociBinary string, label string) ([]string, error) {
|
||||
if err := PointToHostDockerDaemon(); err != nil {
|
||||
return nil, errors.Wrap(err, "point host docker-daemon")
|
||||
}
|
||||
cmd := exec.Command(ociBinary, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}")
|
||||
var b bytes.Buffer
|
||||
cmd.Stdout = &b
|
||||
cmd.Stderr = &b
|
||||
err := cmd.Run()
|
||||
var lines []string
|
||||
sc := bufio.NewScanner(&b)
|
||||
for sc.Scan() {
|
||||
lines = append(lines, sc.Text())
|
||||
stdout, err := cmd.Output()
|
||||
s := bufio.NewScanner(bytes.NewReader(stdout))
|
||||
var names []string
|
||||
for s.Scan() {
|
||||
n := strings.TrimSpace(s.Text())
|
||||
if n != "" {
|
||||
names = append(names, n)
|
||||
}
|
||||
}
|
||||
return lines, err
|
||||
|
||||
return names, err
|
||||
}
|
||||
|
||||
// PointToHostDockerDaemon will unset env variables that point to docker inside minikube
|
||||
|
|
|
@ -19,14 +19,19 @@ package oci
|
|||
const (
|
||||
// DefaultBindIPV4 is The default IP the container will listen on.
|
||||
DefaultBindIPV4 = "127.0.0.1"
|
||||
Docker = "docker"
|
||||
Podman = "podman"
|
||||
// ClusterLabelKey is applied to each node docker container for identification
|
||||
ClusterLabelKey = "io.x-k8s.kic.cluster"
|
||||
// Docker is docker
|
||||
Docker = "docker"
|
||||
// Podman is podman
|
||||
Podman = "podman"
|
||||
// ProfileLabelKey is applied to any container or volume created by a specific minikube profile name.minikube.sigs.k8s.io=PROFILE_NAME
|
||||
ProfileLabelKey = "name.minikube.sigs.k8s.io"
|
||||
// NodeRoleKey is used to identify if it is control plane or worker
|
||||
nodeRoleKey = "io.k8s.sigs.kic.role"
|
||||
nodeRoleLabelKey = "role.minikube.sigs.k8s.io"
|
||||
// CreatedByLabelKey is applied to any container/volume that is created by minikube created_by.minikube.sigs.k8s.io=true
|
||||
CreatedByLabelKey = "created_by.minikube.sigs.k8s.io"
|
||||
)
|
||||
|
||||
// CreateParams are parameters needed to create a container
|
||||
type CreateParams struct {
|
||||
Name string // used for container name and hostname
|
||||
Image string // container image to use to create the node.
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package oci
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DeleteAllVolumesByLabel deletes all volumes that have a specific label
|
||||
// if there is no volume to delete it will return nil
|
||||
func DeleteAllVolumesByLabel(ociBin string, label string) []error {
|
||||
var deleteErrs []error
|
||||
glog.Infof("trying to delete all %s volumes with label %s", ociBin, label)
|
||||
if ociBin == Docker {
|
||||
if err := PointToHostDockerDaemon(); err != nil {
|
||||
return []error{errors.Wrap(err, "point host docker-daemon")}
|
||||
}
|
||||
}
|
||||
|
||||
vs, err := allVolumesByLabel(ociBin, label)
|
||||
if err != nil {
|
||||
return []error{fmt.Errorf("listing volumes by label %q: %v", label, err)}
|
||||
}
|
||||
|
||||
for _, v := range vs {
|
||||
cmd := exec.Command(ociBin, "volume", "rm", "--force", v)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
deleteErrs = append(deleteErrs, fmt.Errorf("deleting volume %s: output: %s", v, string(out)))
|
||||
}
|
||||
}
|
||||
return deleteErrs
|
||||
}
|
||||
|
||||
// PruneAllVolumesByLabel deletes all volumes that have a specific label
|
||||
// if there is no volume to delete it will return nil
|
||||
// example: docker volume prune -f --filter label=name.minikube.sigs.k8s.io=minikube
|
||||
func PruneAllVolumesByLabel(ociBin string, label string) []error {
|
||||
var deleteErrs []error
|
||||
glog.Infof("trying to prune all %s volumes with label %s", ociBin, label)
|
||||
if ociBin == Docker {
|
||||
if err := PointToHostDockerDaemon(); err != nil {
|
||||
return []error{errors.Wrap(err, "point host docker-daemon")}
|
||||
}
|
||||
}
|
||||
|
||||
// try to prune afterwards just in case delete didn't go through
|
||||
cmd := exec.Command(ociBin, "volume", "prune", "-f", "--filter", "label="+label)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
deleteErrs = append(deleteErrs, errors.Wrapf(err, "prune volume by label %s: %s", label, string(out)))
|
||||
}
|
||||
return deleteErrs
|
||||
}
|
||||
|
||||
// allVolumesByLabel returns name of all docker volumes by a specific label
|
||||
// will not return error if there is no volume found.
|
||||
func allVolumesByLabel(ociBin string, label string) ([]string, error) {
|
||||
cmd := exec.Command(ociBin, "volume", "ls", "--filter", "label="+label, "--format", "{{.Name}}")
|
||||
stdout, err := cmd.Output()
|
||||
s := bufio.NewScanner(bytes.NewReader(stdout))
|
||||
var vols []string
|
||||
for s.Scan() {
|
||||
v := strings.TrimSpace(s.Text())
|
||||
if v != "" {
|
||||
vols = append(vols, v)
|
||||
}
|
||||
}
|
||||
return vols, err
|
||||
}
|
||||
|
||||
// createDockerVolume creates a docker volume to be attached to the container with correct labels and prefixes based on profile name
|
||||
// Caution ! if volume already exists does NOT return an error and will not apply the minikube labels on it.
|
||||
// TODO: this should be fixed as a part of https://github.com/kubernetes/minikube/issues/6530
|
||||
func createDockerVolume(name string) error {
|
||||
if err := PointToHostDockerDaemon(); err != nil {
|
||||
return errors.Wrap(err, "point host docker-daemon")
|
||||
}
|
||||
cmd := exec.Command(Docker, "volume", "create", name, "--label", fmt.Sprintf("%s=%s", ProfileLabelKey, name), "--label", fmt.Sprintf("%s=%s", CreatedByLabelKey, "true"))
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return errors.Wrapf(err, "output %s", string(out))
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -16,21 +16,34 @@ limitations under the License.
|
|||
|
||||
package kic
|
||||
|
||||
import "k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
)
|
||||
|
||||
const (
|
||||
// Docker default bridge network is named "bridge" (https://docs.docker.com/network/bridge/#use-the-default-bridge-network)
|
||||
// DefaultNetwork is the Docker default bridge network named "bridge"
|
||||
// (https://docs.docker.com/network/bridge/#use-the-default-bridge-network)
|
||||
DefaultNetwork = "bridge"
|
||||
// DefaultPodCIDR is The CIDR to be used for pods inside the node.
|
||||
DefaultPodCIDR = "10.244.0.0/16"
|
||||
|
||||
// BaseImage is the base image is used to spin up kic containers. it uses same base-image as kind.
|
||||
BaseImage = "gcr.io/k8s-minikube/kicbase:v0.0.5@sha256:3ddd8461dfb5c3e452ccc44d87750b87a574ec23fc425da67dccc1f0c57d428a"
|
||||
// Version is the current version of kic
|
||||
Version = "v0.0.7"
|
||||
// SHA of the kic base image
|
||||
baseImageSHA = "a6f288de0e5863cdeab711fa6bafa38ee7d8d285ca14216ecf84fcfb07c7d176"
|
||||
|
||||
// OverlayImage is the cni plugin used for overlay image, created by kind.
|
||||
// CNI plugin image used for kic drivers created by kind.
|
||||
OverlayImage = "kindest/kindnetd:0.5.3"
|
||||
)
|
||||
|
||||
var (
|
||||
// BaseImage is the base image is used to spin up kic containers. it uses same base-image as kind.
|
||||
BaseImage = fmt.Sprintf("gcr.io/k8s-minikube/kicbase:%s@sha256:%s", Version, baseImageSHA)
|
||||
)
|
||||
|
||||
// Config is configuration for the kic driver used by registry
|
||||
type Config struct {
|
||||
MachineName string // maps to the container name being created
|
||||
|
|
|
@ -40,7 +40,7 @@ const (
|
|||
containerdConfigTomlPath = "/etc/containerd/config.toml"
|
||||
storedContainerdConfigTomlPath = "/tmp/config.toml"
|
||||
gvisorContainerdShimURL = "https://github.com/google/gvisor-containerd-shim/releases/download/v0.0.3/containerd-shim-runsc-v1.linux-amd64"
|
||||
gvisorURL = "https://storage.googleapis.com/gvisor/releases/nightly/2019-11-27/runsc"
|
||||
gvisorURL = "https://storage.googleapis.com/gvisor/releases/nightly/2020-02-14/runsc"
|
||||
)
|
||||
|
||||
// Enable follows these steps for enabling gvisor in minikube:
|
||||
|
|
|
@ -28,8 +28,12 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/config"
|
||||
)
|
||||
|
||||
// enum to differentiate kubeadm command line parameters from kubeadm config file parameters (see the
|
||||
// KubeadmExtraArgsWhitelist variable for more info)
|
||||
const (
|
||||
KubeadmCmdParam = iota
|
||||
// KubeadmCmdParam is command parameters for kubeadm
|
||||
KubeadmCmdParam = iota
|
||||
// KubeadmConfigParam is config parameters for kubeadm
|
||||
KubeadmConfigParam = iota
|
||||
)
|
||||
|
||||
|
|
|
@ -29,10 +29,11 @@ import (
|
|||
var KubeadmYamlPath = path.Join(vmpath.GuestEphemeralDir, "kubeadm.yaml")
|
||||
|
||||
const (
|
||||
//DefaultCNIConfigPath is the configuration file for CNI networks
|
||||
DefaultCNIConfigPath = "/etc/cni/net.d/k8s.conf"
|
||||
KubeletServiceFile = "/lib/systemd/system/kubelet.service"
|
||||
// enum to differentiate kubeadm command line parameters from kubeadm config file parameters (see the
|
||||
// KubeadmExtraArgsWhitelist variable for more info)
|
||||
// KubeletServiceFile is the file for the systemd kubelet.service
|
||||
KubeletServiceFile = "/lib/systemd/system/kubelet.service"
|
||||
// KubeletSystemdConfFile is config for the systemd kubelet.service
|
||||
KubeletSystemdConfFile = "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
|
||||
)
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ kind: MasterConfiguration
|
|||
api:
|
||||
advertiseAddress: {{.AdvertiseAddress}}
|
||||
bindPort: {{.APIServerPort}}
|
||||
controlPlaneEndpoint: localhost
|
||||
controlPlaneEndpoint: {{.ControlPlaneIP}}
|
||||
kubernetesVersion: {{.KubernetesVersion}}
|
||||
certificatesDir: {{.CertDir}}
|
||||
networking:
|
||||
|
|
|
@ -36,6 +36,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: {{if .CRISocket}}{{.CRISocket}}{{else}}/var/run/dockershim.sock{{end}}
|
||||
name: "{{.NodeName}}"
|
||||
kubeletExtraArgs:
|
||||
node-ip: {{.NodeIP}}
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -50,7 +52,7 @@ kind: ClusterConfiguration
|
|||
certificatesDir: {{.CertDir}}
|
||||
clusterName: {{.ClusterName}}
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "{{.AdvertiseAddress}}"]
|
||||
controlPlaneEndpoint: localhost:{{.APIServerPort}}
|
||||
controlPlaneEndpoint: {{.ControlPlaneIP}}:{{.APIServerPort}}
|
||||
etcd:
|
||||
local:
|
||||
dataDir: {{.EtcdDataDir}}
|
||||
|
|
|
@ -36,6 +36,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: {{if .CRISocket}}{{.CRISocket}}{{else}}/var/run/dockershim.sock{{end}}
|
||||
name: "{{.NodeName}}"
|
||||
kubeletExtraArgs:
|
||||
node-ip: {{.NodeIP}}
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta1
|
||||
|
@ -55,7 +57,7 @@ kind: ClusterConfiguration
|
|||
{{end -}}{{end -}}
|
||||
certificatesDir: {{.CertDir}}
|
||||
clusterName: {{.ClusterName}}
|
||||
controlPlaneEndpoint: localhost:{{.APIServerPort}}
|
||||
controlPlaneEndpoint: {{.ControlPlaneIP}}:{{.APIServerPort}}
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -36,6 +36,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: {{if .CRISocket}}{{.CRISocket}}{{else}}/var/run/dockershim.sock{{end}}
|
||||
name: "{{.NodeName}}"
|
||||
kubeletExtraArgs:
|
||||
node-ip: {{.NodeIP}}
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
|
@ -55,7 +57,7 @@ kind: ClusterConfiguration
|
|||
{{end -}}{{end -}}
|
||||
certificatesDir: {{.CertDir}}
|
||||
clusterName: kubernetes
|
||||
controlPlaneEndpoint: localhost:{{.APIServerPort}}
|
||||
controlPlaneEndpoint: {{.ControlPlaneIP}}:{{.APIServerPort}}
|
||||
controllerManager: {}
|
||||
dns:
|
||||
type: CoreDNS
|
||||
|
|
|
@ -36,7 +36,7 @@ import (
|
|||
const remoteContainerRuntime = "remote"
|
||||
|
||||
// GenerateKubeadmYAML generates the kubeadm.yaml file
|
||||
func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager) ([]byte, error) {
|
||||
func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.Node) ([]byte, error) {
|
||||
k8s := mc.KubernetesConfig
|
||||
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
|
||||
if err != nil {
|
||||
|
@ -80,6 +80,8 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager) ([]byte, e
|
|||
ComponentOptions []componentOptions
|
||||
FeatureArgs map[string]bool
|
||||
NoTaintMaster bool
|
||||
NodeIP string
|
||||
ControlPlaneIP string
|
||||
}{
|
||||
CertDir: vmpath.GuestKubernetesCertsDir,
|
||||
ServiceCIDR: constants.DefaultServiceCIDR,
|
||||
|
@ -96,6 +98,8 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager) ([]byte, e
|
|||
FeatureArgs: kubeadmFeatureArgs,
|
||||
NoTaintMaster: false, // That does not work with k8s 1.12+
|
||||
DNSDomain: k8s.DNSDomain,
|
||||
NodeIP: n.IP,
|
||||
ControlPlaneIP: cp.IP,
|
||||
}
|
||||
|
||||
if k8s.ServiceCIDR != "" {
|
||||
|
|
|
@ -129,7 +129,7 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) {
|
|||
cfg.KubernetesConfig.KubernetesVersion = version + ".0"
|
||||
cfg.KubernetesConfig.ClusterName = "kubernetes"
|
||||
|
||||
got, err := GenerateKubeadmYAML(cfg, runtime)
|
||||
got, err := GenerateKubeadmYAML(cfg, runtime, cfg.Nodes[0])
|
||||
if err != nil && !tc.shouldErr {
|
||||
t.Fatalf("got unexpected error generating config: %v", err)
|
||||
}
|
||||
|
@ -210,7 +210,7 @@ func TestGenerateKubeadmYAML(t *testing.T) {
|
|||
cfg.KubernetesConfig.KubernetesVersion = version + ".0"
|
||||
cfg.KubernetesConfig.ClusterName = "kubernetes"
|
||||
|
||||
got, err := GenerateKubeadmYAML(cfg, runtime)
|
||||
got, err := GenerateKubeadmYAML(cfg, runtime, cfg.Nodes[0])
|
||||
if err != nil && !tc.shouldErr {
|
||||
t.Fatalf("got unexpected error generating config: %v", err)
|
||||
}
|
||||
|
|
|
@ -79,7 +79,7 @@ Wants=crio.service
|
|||
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
||||
ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
||||
|
||||
[Install]
|
||||
`,
|
||||
|
@ -104,7 +104,7 @@ Wants=containerd.service
|
|||
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
||||
ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
||||
|
||||
[Install]
|
||||
`,
|
||||
|
@ -136,7 +136,7 @@ Wants=containerd.service
|
|||
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
||||
ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
||||
|
||||
[Install]
|
||||
`,
|
||||
|
@ -162,7 +162,7 @@ Wants=docker.socket
|
|||
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.1 --pod-manifest-path=/etc/kubernetes/manifests
|
||||
ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.1 --pod-manifest-path=/etc/kubernetes/manifests
|
||||
|
||||
[Install]
|
||||
`,
|
||||
|
|
|
@ -182,6 +182,7 @@ func apiServerHealthz(ip net.IP, port int) (state.State, error) {
|
|||
return state.Running, nil
|
||||
}
|
||||
|
||||
// KubeletStatus checks the kubelet status
|
||||
func KubeletStatus(cr command.Runner) (state.State, error) {
|
||||
glog.Infof("Checking kubelet status ...")
|
||||
rr, err := cr.RunCmd(exec.Command("sudo", "systemctl", "is-active", "kubelet"))
|
||||
|
|
|
@ -18,60 +18,14 @@ package bsutil
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
rbac "k8s.io/api/rbac/v1beta1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
||||
const (
|
||||
rbacName = "minikube-rbac"
|
||||
)
|
||||
|
||||
// ElevateKubeSystemPrivileges gives the kube-system service account
|
||||
// cluster admin privileges to work with RBAC.
|
||||
func ElevateKubeSystemPrivileges(client kubernetes.Interface) error {
|
||||
start := time.Now()
|
||||
clusterRoleBinding := &rbac.ClusterRoleBinding{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: rbacName,
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: "default",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
Kind: "ClusterRole",
|
||||
Name: "cluster-admin",
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := client.RbacV1beta1().ClusterRoleBindings().Get(rbacName, meta.GetOptions{}); err == nil {
|
||||
glog.Infof("Role binding %s already exists. Skipping creation.", rbacName)
|
||||
return nil
|
||||
}
|
||||
if _, err := client.RbacV1beta1().ClusterRoleBindings().Create(clusterRoleBinding); err != nil {
|
||||
netErr, ok := err.(net.Error)
|
||||
if ok && netErr.Timeout() {
|
||||
return &retry.RetriableError{Err: errors.Wrap(err, "creating clusterrolebinding")}
|
||||
}
|
||||
return errors.Wrap(err, "creating clusterrolebinding")
|
||||
}
|
||||
glog.Infof("duration metric: took %s to wait for elevateKubeSystemPrivileges.", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
// AdjustResourceLimits makes fine adjustments to pod resources that aren't possible via kubeadm config.
|
||||
func AdjustResourceLimits(c command.Runner) error {
|
||||
rr, err := c.RunCmd(exec.Command("/bin/bash", "-c", "cat /proc/$(pgrep kube-apiserver)/oom_adj"))
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 12345
|
||||
controlPlaneEndpoint: localhost
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 8443
|
||||
controlPlaneEndpoint: localhost
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 8443
|
||||
controlPlaneEndpoint: localhost
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 8443
|
||||
controlPlaneEndpoint: localhost
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 8443
|
||||
controlPlaneEndpoint: localhost
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 8443
|
||||
controlPlaneEndpoint: localhost
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 8443
|
||||
controlPlaneEndpoint: localhost
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 8443
|
||||
controlPlaneEndpoint: localhost
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /run/containerd/containerd.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -22,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:12345
|
||||
controlPlaneEndpoint: 1.1.1.1:12345
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /run/containerd/containerd.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -22,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /run/containerd/containerd.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -22,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/crio/crio.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -30,7 +32,7 @@ schedulerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/crio/crio.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -22,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/dockershim.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -22,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/dockershim.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -22,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/dockershim.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -23,7 +25,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/dockershim.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -27,7 +29,7 @@ schedulerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /run/containerd/containerd.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -22,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:12345
|
||||
controlPlaneEndpoint: 1.1.1.1:12345
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /run/containerd/containerd.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -22,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /run/containerd/containerd.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -22,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/crio/crio.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -30,7 +32,7 @@ schedulerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/crio/crio.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -22,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/dockershim.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -22,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/dockershim.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -22,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/dockershim.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -23,7 +25,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/dockershim.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1alpha3
|
||||
|
@ -27,7 +29,7 @@ schedulerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /run/containerd/containerd.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta1
|
||||
|
@ -23,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
controlPlaneEndpoint: localhost:12345
|
||||
controlPlaneEndpoint: 1.1.1.1:12345
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /run/containerd/containerd.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta1
|
||||
|
@ -23,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /run/containerd/containerd.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta1
|
||||
|
@ -23,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/crio/crio.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta1
|
||||
|
@ -33,7 +35,7 @@ scheduler:
|
|||
scheduler-name: "mini-scheduler"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/crio/crio.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta1
|
||||
|
@ -23,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/dockershim.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta1
|
||||
|
@ -23,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -13,6 +13,8 @@ bootstrapTokens:
|
|||
nodeRegistration:
|
||||
criSocket: /var/run/dockershim.sock
|
||||
name: "mk"
|
||||
kubeletExtraArgs:
|
||||
node-ip: 1.1.1.1
|
||||
taints: []
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta1
|
||||
|
@ -23,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: kubernetes
|
||||
controlPlaneEndpoint: localhost:8443
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue