Merge branch 'master' of github.com:kubernetes/minikube into ingress-none
commit
bef7a2a2b7
|
|
@ -103,7 +103,7 @@ jobs:
|
|||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -115,8 +115,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- name: Upload report
|
||||
uses: actions/upload-artifact@v1
|
||||
with:
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ jobs:
|
|||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -95,8 +95,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail' || true)
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests' || true)
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: kic_image_functional_test_docker_ubuntu
|
||||
|
|
|
|||
|
|
@ -145,7 +145,7 @@ jobs:
|
|||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -157,8 +157,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: functional_docker_ubuntu
|
||||
|
|
@ -241,7 +243,7 @@ jobs:
|
|||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -253,8 +255,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: functional_virtualbox_macos
|
||||
|
|
@ -373,7 +377,7 @@ jobs:
|
|||
echo "----"
|
||||
echo $T_ELAPSED
|
||||
echo "----"
|
||||
echo "::set-env name=TIME_ELAPSED::$T_ELAPSED"
|
||||
echo "TIME_ELAPSED=$T_ELAPSED" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
|
||||
- name: Generate HTML Report
|
||||
continue-on-error: true
|
||||
shell: powershell
|
||||
|
|
@ -385,8 +389,8 @@ jobs:
|
|||
$FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
$TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
$GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${Env:TIME_ELAPSED}"
|
||||
echo "::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}"
|
||||
echo "::set-env name=STAT::${STAT}"
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
|
||||
echo "STAT=${STAT}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
|
||||
echo ${GOPOGH_RESULT}
|
||||
$numFail=(echo $STAT | jq '.NumberOfFail')
|
||||
$failedTests=( echo $STAT | jq '.FailedTests')
|
||||
|
|
@ -510,7 +514,7 @@ jobs:
|
|||
echo "----"
|
||||
echo $T_ELAPSED
|
||||
echo "----"
|
||||
echo "::set-env name=TIME_ELAPSED::$T_ELAPSED"
|
||||
echo "TIME_ELAPSED=$T_ELAPSED" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
|
||||
- name: Generate HTML Report
|
||||
continue-on-error: true
|
||||
shell: powershell
|
||||
|
|
@ -522,8 +526,8 @@ jobs:
|
|||
$FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
$TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
$GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${Env:TIME_ELAPSED}"
|
||||
echo "::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}"
|
||||
echo "::set-env name=STAT::${STAT}"
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
|
||||
echo "STAT=${STAT}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
|
||||
echo ${GOPOGH_RESULT}
|
||||
$numFail=(echo $STAT | jq '.NumberOfFail')
|
||||
$failedTests=( echo $STAT | jq '.FailedTests')
|
||||
|
|
@ -611,7 +615,7 @@ jobs:
|
|||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -623,8 +627,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: none_ubuntu18_04
|
||||
|
|
@ -700,13 +706,13 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestAddons|TestCertOptions|TestSkaffold)" -test.timeout=20m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestAddons|TestCertOptions|TestSkaffold)" -test.timeout=30m -test.v -timeout-multiplier=1.2 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -718,8 +724,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: addons_certs_docker_ubuntu
|
||||
|
|
@ -796,13 +804,13 @@ jobs:
|
|||
cp minikube-darwin-amd64 minikube
|
||||
chmod a+x minikube*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-darwin-amd64 -minikube-start-args=--vm-driver=virtualbox -test.run "(TestAddons|TestCertOptions|TestSkaffold)" -test.timeout=20m -test.v -timeout-multiplier=1.5 -binary=./minikube-darwin-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-darwin-amd64 -minikube-start-args=--vm-driver=virtualbox -test.run "(TestAddons|TestCertOptions|TestSkaffold)" -test.timeout=30m -test.v -timeout-multiplier=1.2 -binary=./minikube-darwin-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -814,8 +822,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: addons_certs_virtualbox_macos
|
||||
|
|
@ -898,7 +908,7 @@ jobs:
|
|||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -910,8 +920,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: multinode_docker_ubuntu
|
||||
|
|
@ -982,13 +994,13 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-darwin-amd64 -minikube-start-args=--driver=virtualbox -test.run "TestMultiNode" -test.timeout=17m -test.v -timeout-multiplier=1.5 -binary=./minikube-darwin-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-darwin-amd64 -minikube-start-args=--driver=virtualbox -test.run "TestMultiNode" -test.timeout=30m -test.v -timeout-multiplier=1.2 -binary=./minikube-darwin-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -1000,8 +1012,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: multinode_virtualbox_macos
|
||||
|
|
@ -1077,13 +1091,13 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestPreload|TestDockerFlags)" -test.timeout=15m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestDockerFlags|TestPreload)" -test.timeout=30m -test.v -timeout-multiplier=1.2 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -1095,8 +1109,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: preload_dockerflags_docker_ubuntu
|
||||
|
|
@ -1167,13 +1183,13 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-darwin-amd64 -minikube-start-args=--vm-driver=virtualbox -test.run "(TestPause|TestPreload|TestDockerFlags)" -test.timeout=15m -test.v -timeout-multiplier=1.5 -binary=./minikube-darwin-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-darwin-amd64 -minikube-start-args=--vm-driver=virtualbox -test.run "(TestPause|TestPreload|TestDockerFlags)" -test.timeout=30m -test.v -timeout-multiplier=1.2 -binary=./minikube-darwin-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -1185,8 +1201,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: pause_preload_dockerflags_virtualbox_macos
|
||||
|
|
|
|||
|
|
@ -143,7 +143,7 @@ jobs:
|
|||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -155,8 +155,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: functional_docker_ubuntu
|
||||
|
|
@ -239,7 +241,7 @@ jobs:
|
|||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -251,8 +253,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: functional_virtualbox_macos
|
||||
|
|
@ -371,7 +375,7 @@ jobs:
|
|||
echo "----"
|
||||
echo $T_ELAPSED
|
||||
echo "----"
|
||||
echo "::set-env name=TIME_ELAPSED::$T_ELAPSED"
|
||||
echo "TIME_ELAPSED=$T_ELAPSED" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
|
||||
- name: Generate HTML Report
|
||||
continue-on-error: true
|
||||
shell: powershell
|
||||
|
|
@ -383,8 +387,8 @@ jobs:
|
|||
$FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
$TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
$GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${Env:TIME_ELAPSED}"
|
||||
echo "::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}"
|
||||
echo "::set-env name=STAT::${STAT}"
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
|
||||
echo "STAT=${STAT}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
|
||||
echo ${GOPOGH_RESULT}
|
||||
$numFail=(echo $STAT | jq '.NumberOfFail')
|
||||
$failedTests=( echo $STAT | jq '.FailedTests')
|
||||
|
|
@ -508,7 +512,7 @@ jobs:
|
|||
echo "----"
|
||||
echo $T_ELAPSED
|
||||
echo "----"
|
||||
echo "::set-env name=TIME_ELAPSED::$T_ELAPSED"
|
||||
echo "TIME_ELAPSED=$T_ELAPSED" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
|
||||
- name: Generate HTML Report
|
||||
continue-on-error: true
|
||||
shell: powershell
|
||||
|
|
@ -520,8 +524,8 @@ jobs:
|
|||
$FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
$TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
$GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${Env:TIME_ELAPSED}"
|
||||
echo "::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}"
|
||||
echo "::set-env name=STAT::${STAT}"
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
|
||||
echo "STAT=${STAT}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
|
||||
echo ${GOPOGH_RESULT}
|
||||
$numFail=(echo $STAT | jq '.NumberOfFail')
|
||||
$failedTests=( echo $STAT | jq '.FailedTests')
|
||||
|
|
@ -609,7 +613,7 @@ jobs:
|
|||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -621,8 +625,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: none_ubuntu18_04
|
||||
|
|
@ -698,13 +704,13 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestAddons|TestCertOptions|TestSkaffold)" -test.timeout=20m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestAddons|TestCertOptions|TestSkaffold)" -test.timeout=30m -test.v -timeout-multiplier=1.2 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -716,8 +722,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: addons_certs_docker_ubuntu
|
||||
|
|
@ -794,13 +802,13 @@ jobs:
|
|||
cp minikube-darwin-amd64 minikube
|
||||
chmod a+x minikube*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-darwin-amd64 -minikube-start-args=--vm-driver=virtualbox -test.run "(TestAddons|TestCertOptions|TestSkaffold)" -test.timeout=20m -test.v -timeout-multiplier=1.5 -binary=./minikube-darwin-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-darwin-amd64 -minikube-start-args=--vm-driver=virtualbox -test.run "(TestAddons|TestCertOptions|TestSkaffold)" -test.timeout=30m -test.v -timeout-multiplier=1.2 -binary=./minikube-darwin-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -812,8 +820,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: addons_certs_virtualbox_macos
|
||||
|
|
@ -896,7 +906,7 @@ jobs:
|
|||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -908,8 +918,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: multinode_docker_ubuntu
|
||||
|
|
@ -980,13 +992,13 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-darwin-amd64 -minikube-start-args=--driver=virtualbox -test.run "TestMultiNode" -test.timeout=17m -test.v -timeout-multiplier=1.5 -binary=./minikube-darwin-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-darwin-amd64 -minikube-start-args=--driver=virtualbox -test.run "TestMultiNode" -test.timeout=30m -test.v -timeout-multiplier=1.2 -binary=./minikube-darwin-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -998,8 +1010,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: multinode_virtualbox_macos
|
||||
|
|
@ -1075,13 +1089,13 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestDockerFlags|TestPreload)" -test.timeout=10m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.run "(TestDockerFlags|TestPreload)" -test.timeout=30m -test.v -timeout-multiplier=1.2 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -1093,8 +1107,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: preload_dockerflags_docker_ubuntu
|
||||
|
|
@ -1165,13 +1181,13 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-darwin-amd64 -minikube-start-args=--vm-driver=virtualbox -test.run "(TestPause|TestPreload|TestDockerFlags)" -test.timeout=20m -test.v -timeout-multiplier=1.5 -binary=./minikube-darwin-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-darwin-amd64 -minikube-start-args=--vm-driver=virtualbox -test.run "(TestPause|TestPreload|TestDockerFlags)" -test.timeout=30m -test.v -timeout-multiplier=1.2 -binary=./minikube-darwin-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds "
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
echo "TIME_ELAPSED=${TIME_ELAPSED}" >> $GITHUB_ENV
|
||||
- name: Generate HTML Report
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -1183,8 +1199,10 @@ jobs:
|
|||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
echo "GOPOGH_RESULT=${GOPOGH_RESULT}" >> $GITHUB_ENV
|
||||
echo 'STAT<<EOF' >> $GITHUB_ENV
|
||||
echo "${STAT}" >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: pause_preload_dockerflags_virtualbox_macos
|
||||
|
|
|
|||
43
.travis.yml
43
.travis.yml
|
|
@ -1,43 +0,0 @@
|
|||
---
|
||||
# linted with yamllint
|
||||
os: linux
|
||||
language: go
|
||||
go:
|
||||
- 1.15.2
|
||||
env:
|
||||
global:
|
||||
- GOPROXY=https://proxy.golang.org
|
||||
matrix:
|
||||
include:
|
||||
- language: go
|
||||
name: Code Lint
|
||||
go: 1.15.2
|
||||
env:
|
||||
- TESTSUITE=lintall
|
||||
before_install:
|
||||
- sudo apt-get install -y libvirt-dev
|
||||
script: make test
|
||||
|
||||
- language: go
|
||||
name: Unit Test
|
||||
go: 1.15.2
|
||||
env:
|
||||
- TESTSUITE=unittest
|
||||
before_install:
|
||||
- sudo apt-get install -y libvirt-dev
|
||||
script: make test
|
||||
|
||||
- language: go
|
||||
name: Build
|
||||
go: 1.15.2
|
||||
script: make
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
notifications:
|
||||
webhooks:
|
||||
urls:
|
||||
- https://www.travisbuddy.com?only=failed,errored
|
||||
on_success: never # don't comment on successful builds.
|
||||
on_failure: always
|
||||
on_cancel: always
|
||||
on_error: always
|
||||
28
Makefile
28
Makefile
|
|
@ -23,7 +23,7 @@ KUBERNETES_VERSION ?= $(shell egrep "DefaultKubernetesVersion =" pkg/minikube/co
|
|||
KIC_VERSION ?= $(shell egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2)
|
||||
|
||||
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
|
||||
ISO_VERSION ?= v1.14.0
|
||||
ISO_VERSION ?= v1.15.0-snapshot1
|
||||
# Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta
|
||||
DEB_VERSION ?= $(subst -,~,$(RAW_VERSION))
|
||||
RPM_VERSION ?= $(DEB_VERSION)
|
||||
|
|
@ -32,7 +32,7 @@ RPM_VERSION ?= $(DEB_VERSION)
|
|||
GO_VERSION ?= 1.15.2
|
||||
|
||||
INSTALL_SIZE ?= $(shell du out/minikube-windows-amd64.exe | cut -f1)
|
||||
BUILDROOT_BRANCH ?= 2020.02.6
|
||||
BUILDROOT_BRANCH ?= 2020.02.7
|
||||
REGISTRY?=gcr.io/k8s-minikube
|
||||
REGISTRY_GH?=docker.pkg.github.com/kubernetes/minikube
|
||||
|
||||
|
|
@ -58,7 +58,7 @@ MINIKUBE_BUCKET ?= minikube/releases
|
|||
MINIKUBE_UPLOAD_LOCATION := gs://${MINIKUBE_BUCKET}
|
||||
MINIKUBE_RELEASES_URL=https://github.com/kubernetes/minikube/releases/download
|
||||
|
||||
KERNEL_VERSION ?= 4.19.114
|
||||
KERNEL_VERSION ?= 4.19.150
|
||||
# latest from https://github.com/golangci/golangci-lint/releases
|
||||
GOLINT_VERSION ?= v1.30.0
|
||||
# Limit number of default jobs, to avoid the CI builds running out of memory
|
||||
|
|
@ -581,13 +581,12 @@ storage-provisioner-image-%: out/storage-provisioner-%
|
|||
docker build -t $(REGISTRY)/storage-provisioner-$*:$(STORAGE_PROVISIONER_TAG) -f deploy/storage-provisioner/Dockerfile --build-arg arch=$* .
|
||||
|
||||
.PHONY: kic-base-image
|
||||
kic-base-image: ## builds the base image used for kic.
|
||||
docker rmi -f $(KIC_BASE_IMAGE_GCR)-snapshot || true
|
||||
docker build -f ./deploy/kicbase/Dockerfile -t local/kicbase:$(KIC_VERSION)-snapshot --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --cache-from $(KIC_BASE_IMAGE_GCR) ./deploy/kicbase
|
||||
docker tag local/kicbase:$(KIC_VERSION)-snapshot $(KIC_BASE_IMAGE_GCR)-snapshot
|
||||
docker tag local/kicbase:$(KIC_VERSION)-snapshot $(KIC_BASE_IMAGE_GCR)
|
||||
docker tag local/kicbase:$(KIC_VERSION)-snapshot $(KIC_BASE_IMAGE_HUB)
|
||||
docker tag local/kicbase:$(KIC_VERSION)-snapshot $(KIC_BASE_IMAGE_GH)
|
||||
kic-base-image: ## builds the kic base image and tags local/kicbase:latest and local/kicbase:$(KIC_VERSION)-$(COMMIT_SHORT)
|
||||
docker rmi -f local/kicbase:latest || true
|
||||
docker rmi -f local/kicbase:$(KIC_VERSION) || true
|
||||
docker build -f ./deploy/kicbase/Dockerfile -t local/kicbase:$(KIC_VERSION) --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --cache-from $(KIC_BASE_IMAGE_GCR) ./deploy/kicbase
|
||||
docker tag local/kicbase:$(KIC_VERSION) local/kicbase:latest
|
||||
docker tag local/kicbase:$(KIC_VERSION) local/kicbase:$(KIC_VERSION)-$(COMMIT_SHORT)
|
||||
|
||||
.PHONY: upload-preloaded-images-tar
|
||||
upload-preloaded-images-tar: out/minikube # Upload the preloaded images for oldest supported, newest supported, and default kubernetes versions to GCS.
|
||||
|
|
@ -612,7 +611,7 @@ push-storage-provisioner-manifest: $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~st
|
|||
docker manifest push $(STORAGE_PROVISIONER_MANIFEST)
|
||||
|
||||
.PHONY: push-docker
|
||||
push-docker: # Push docker image base on to IMAGE variable
|
||||
push-docker: # Push docker image base on to IMAGE variable (used internally by other targets)
|
||||
@docker pull $(IMAGE) && echo "Image already exist in registry" && exit 1 || echo "Image doesn't exist in registry"
|
||||
ifndef AUTOPUSH
|
||||
$(call user_confirm, 'Are you sure you want to push $(IMAGE) ?')
|
||||
|
|
@ -622,25 +621,28 @@ endif
|
|||
.PHONY: push-kic-base-image-gcr
|
||||
push-kic-base-image-gcr: kic-base-image ## Push kic-base to gcr
|
||||
docker login gcr.io/k8s-minikube
|
||||
docker tag local/kicbase:latest $(KIC_BASE_IMAGE_GCR)
|
||||
$(MAKE) push-docker IMAGE=$(KIC_BASE_IMAGE_GCR)
|
||||
|
||||
.PHONY: push-kic-base-image-gh
|
||||
push-kic-base-image-gh: kic-base-image ## Push kic-base to github
|
||||
docker login docker.pkg.github.com
|
||||
docker tag local/kicbase:latest $(KIC_BASE_IMAGE_GH)
|
||||
$(MAKE) push-docker IMAGE=$(KIC_BASE_IMAGE_GH)
|
||||
|
||||
.PHONY: push-kic-base-image-hub
|
||||
push-kic-base-image-hub: kic-base-image ## Push kic-base to docker hub
|
||||
docker login
|
||||
docker tag local/kicbase:latest $(KIC_BASE_IMAGE_HUB)
|
||||
$(MAKE) push-docker IMAGE=$(KIC_BASE_IMAGE_HUB)
|
||||
|
||||
.PHONY: push-kic-base-image
|
||||
push-kic-base-image: ## Push kic-base to all registries
|
||||
push-kic-base-image: ## Push local/kicbase:latest to all remote registries
|
||||
ifndef AUTOPUSH
|
||||
$(call user_confirm, 'Are you sure you want to push: $(KIC_BASE_IMAGE_GH) & $(KIC_BASE_IMAGE_GCR) & $(KIC_BASE_IMAGE_HUB) ?')
|
||||
$(MAKE) push-kic-base-image AUTOPUSH=true
|
||||
else
|
||||
$(MAKE) push-kic-base-image-gh push-kic-base-image-gcr push-kic-base-image-hub
|
||||
$(MAKE) push-kic-base-image-gcr push-kic-base-image-hub push-kic-base-image-gh
|
||||
endif
|
||||
|
||||
.PHONY: out/gvisor-addon
|
||||
|
|
|
|||
1
OWNERS
1
OWNERS
|
|
@ -10,6 +10,7 @@ reviewers:
|
|||
- blueelvis
|
||||
- priyawadhwa
|
||||
- prasadkatti
|
||||
- ilya-zuyev
|
||||
approvers:
|
||||
- tstromberg
|
||||
- afbjorklund
|
||||
|
|
|
|||
|
|
@ -1,14 +1,11 @@
|
|||
# minikube
|
||||
|
||||
[![BuildStatus Widget]][BuildStatus Result]
|
||||
[](https://github.com/kubernetes/minikube/actions)
|
||||
[![GoReport Widget]][GoReport Status]
|
||||
[](https://github.com/kubernetes/minikube/releases/latest)
|
||||
[](https://github.com/kubernetes/minikube/releases/latest)
|
||||
|
||||
|
||||
[BuildStatus Result]: https://travis-ci.org/kubernetes/minikube
|
||||
[BuildStatus Widget]: https://travis-ci.org/kubernetes/minikube.svg?branch=master
|
||||
|
||||
[GoReport Status]: https://goreportcard.com/report/github.com/kubernetes/minikube
|
||||
[GoReport Widget]: https://goreportcard.com/badge/github.com/kubernetes/minikube
|
||||
|
||||
|
|
|
|||
|
|
@ -23,10 +23,12 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/minikube/pkg/generate"
|
||||
)
|
||||
|
||||
func TestGenerateDocs(t *testing.T) {
|
||||
pflag.BoolP("help", "h", false, "") // avoid 'Docs are not updated. Please run `make generate-docs` to update commands documentation' error
|
||||
dir := "../../../site/content/en/docs/commands/"
|
||||
|
||||
for _, sc := range RootCmd.Commands() {
|
||||
|
|
|
|||
|
|
@ -50,6 +50,7 @@ var pauseCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func runPause(cmd *cobra.Command, args []string) {
|
||||
out.SetJSON(outputFormat == "json")
|
||||
co := mustload.Running(ClusterFlagValue())
|
||||
register.SetEventLogPath(localpath.EventLog(ClusterFlagValue()))
|
||||
register.Reg.SetStep(register.Pausing)
|
||||
|
|
@ -105,4 +106,5 @@ func runPause(cmd *cobra.Command, args []string) {
|
|||
func init() {
|
||||
pauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", constants.DefaultNamespaces, "namespaces to pause")
|
||||
pauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, pause all namespaces")
|
||||
pauseCmd.Flags().StringVarP(&outputFormat, "output", "o", "text", "Format to print stdout in. Options include: [text,json]")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,12 +39,16 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/shell"
|
||||
)
|
||||
|
||||
var podmanEnvTmpl = fmt.Sprintf("{{ .Prefix }}%s{{ .Delimiter }}{{ .VarlinkBridge }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .MinikubePodmanProfile }}{{ .Suffix }}{{ .UsageHint }}", constants.PodmanVarlinkBridgeEnv, constants.MinikubeActivePodmanEnv)
|
||||
var podmanEnv1Tmpl = fmt.Sprintf("{{ .Prefix }}%s{{ .Delimiter }}{{ .VarlinkBridge }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .MinikubePodmanProfile }}{{ .Suffix }}{{ .UsageHint }}", constants.PodmanVarlinkBridgeEnv, constants.MinikubeActivePodmanEnv)
|
||||
|
||||
var podmanEnv2Tmpl = fmt.Sprintf("{{ .Prefix }}%s{{ .Delimiter }}{{ .ContainerHost }}{{ .Suffix }}{{ if .ContainerSSHKey }}{{ .Prefix }}%s{{ .Delimiter }}{{ .ContainerSSHKey}}{{ .Suffix }}{{ end }}{{ .Prefix }}%s{{ .Delimiter }}{{ .MinikubePodmanProfile }}{{ .Suffix }}{{ .UsageHint }}", constants.PodmanContainerHostEnv, constants.PodmanContainerSSHKeyEnv, constants.MinikubeActivePodmanEnv)
|
||||
|
||||
// PodmanShellConfig represents the shell config for Podman
|
||||
type PodmanShellConfig struct {
|
||||
shell.Config
|
||||
VarlinkBridge string
|
||||
ContainerHost string
|
||||
ContainerSSHKey string
|
||||
MinikubePodmanProfile string
|
||||
}
|
||||
|
||||
|
|
@ -59,17 +63,24 @@ func podmanShellCfgSet(ec PodmanEnvConfig, envMap map[string]string) *PodmanShel
|
|||
Config: *shell.CfgSet(ec.EnvConfig, usgPlz, usgCmd),
|
||||
}
|
||||
s.VarlinkBridge = envMap[constants.PodmanVarlinkBridgeEnv]
|
||||
s.ContainerHost = envMap[constants.PodmanContainerHostEnv]
|
||||
s.ContainerSSHKey = envMap[constants.PodmanContainerSSHKeyEnv]
|
||||
s.MinikubePodmanProfile = envMap[constants.MinikubeActivePodmanEnv]
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// isPodmanAvailable checks if Podman is available
|
||||
func isPodmanAvailable(r command.Runner) bool {
|
||||
// isVarlinkAvailable checks if varlink command is available
|
||||
func isVarlinkAvailable(r command.Runner) bool {
|
||||
if _, err := r.RunCmd(exec.Command("which", "varlink")); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// isPodmanAvailable checks if podman command is available
|
||||
func isPodmanAvailable(r command.Runner) bool {
|
||||
if _, err := r.RunCmd(exec.Command("which", "podman")); err != nil {
|
||||
return false
|
||||
}
|
||||
|
|
@ -130,11 +141,25 @@ var podmanEnvCmd = &cobra.Command{
|
|||
exit.Message(reason.Usage, `The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/`)
|
||||
}
|
||||
|
||||
if ok := isPodmanAvailable(co.CP.Runner); !ok {
|
||||
r := co.CP.Runner
|
||||
if ok := isPodmanAvailable(r); !ok {
|
||||
exit.Message(reason.EnvPodmanUnavailable, `The podman service within '{{.cluster}}' is not active`, out.V{"cluster": cname})
|
||||
}
|
||||
|
||||
client, err := createExternalSSHClient(co.CP.Host.Driver)
|
||||
varlink := isVarlinkAvailable(r)
|
||||
|
||||
d := co.CP.Host.Driver
|
||||
client, err := createExternalSSHClient(d)
|
||||
if err != nil {
|
||||
exit.Error(reason.IfSSHClient, "Error getting ssh client", err)
|
||||
}
|
||||
|
||||
hostname, err := d.GetSSHHostname()
|
||||
if err != nil {
|
||||
exit.Error(reason.IfSSHClient, "Error getting ssh client", err)
|
||||
}
|
||||
|
||||
port, err := d.GetSSHPort()
|
||||
if err != nil {
|
||||
exit.Error(reason.IfSSHClient, "Error getting ssh client", err)
|
||||
}
|
||||
|
|
@ -143,7 +168,12 @@ var podmanEnvCmd = &cobra.Command{
|
|||
EnvConfig: sh,
|
||||
profile: cname,
|
||||
driver: driverName,
|
||||
varlink: varlink,
|
||||
client: client,
|
||||
username: d.GetSSHUsername(),
|
||||
hostname: hostname,
|
||||
port: port,
|
||||
keypath: d.GetSSHKeyPath(),
|
||||
}
|
||||
|
||||
if ec.Shell == "" {
|
||||
|
|
@ -162,23 +192,31 @@ var podmanEnvCmd = &cobra.Command{
|
|||
// PodmanEnvConfig encapsulates all external inputs into shell generation for Podman
|
||||
type PodmanEnvConfig struct {
|
||||
shell.EnvConfig
|
||||
profile string
|
||||
driver string
|
||||
client *ssh.ExternalClient
|
||||
profile string
|
||||
driver string
|
||||
varlink bool
|
||||
client *ssh.ExternalClient
|
||||
username string
|
||||
hostname string
|
||||
port int
|
||||
keypath string
|
||||
}
|
||||
|
||||
// podmanSetScript writes out a shell-compatible 'podman-env' script
|
||||
func podmanSetScript(ec PodmanEnvConfig, w io.Writer) error {
|
||||
var podmanEnvTmpl string
|
||||
if ec.varlink {
|
||||
podmanEnvTmpl = podmanEnv1Tmpl
|
||||
} else {
|
||||
podmanEnvTmpl = podmanEnv2Tmpl
|
||||
}
|
||||
envVars := podmanEnvVars(ec)
|
||||
return shell.SetScript(ec.EnvConfig, w, podmanEnvTmpl, podmanShellCfgSet(ec, envVars))
|
||||
}
|
||||
|
||||
// podmanUnsetScript writes out a shell-compatible 'podman-env unset' script
|
||||
func podmanUnsetScript(ec PodmanEnvConfig, w io.Writer) error {
|
||||
vars := []string{
|
||||
constants.PodmanVarlinkBridgeEnv,
|
||||
constants.MinikubeActivePodmanEnv,
|
||||
}
|
||||
vars := podmanEnvNames(ec)
|
||||
return shell.UnsetScript(ec.EnvConfig, w, vars)
|
||||
}
|
||||
|
||||
|
|
@ -190,15 +228,73 @@ func podmanBridge(client *ssh.ExternalClient) string {
|
|||
return strings.Join(command, " ")
|
||||
}
|
||||
|
||||
// podmanURL returns the url to use in a var for accessing the podman socket over ssh
|
||||
func podmanURL(username string, hostname string, port int) string {
|
||||
path := "/run/podman/podman.sock"
|
||||
return fmt.Sprintf("ssh://%s@%s:%d%s", username, hostname, port, path)
|
||||
}
|
||||
|
||||
// podmanEnvVars gets the necessary podman env variables to allow the use of minikube's podman service
|
||||
func podmanEnvVars(ec PodmanEnvConfig) map[string]string {
|
||||
env := map[string]string{
|
||||
constants.PodmanVarlinkBridgeEnv: podmanBridge(ec.client),
|
||||
// podman v1
|
||||
env1 := map[string]string{
|
||||
constants.PodmanVarlinkBridgeEnv: podmanBridge(ec.client),
|
||||
}
|
||||
// podman v2
|
||||
env2 := map[string]string{
|
||||
constants.PodmanContainerHostEnv: podmanURL(ec.username, ec.hostname, ec.port),
|
||||
constants.PodmanContainerSSHKeyEnv: ec.keypath,
|
||||
}
|
||||
//common
|
||||
env0 := map[string]string{
|
||||
constants.MinikubeActivePodmanEnv: ec.profile,
|
||||
}
|
||||
|
||||
var env map[string]string
|
||||
if ec.varlink {
|
||||
env = env1
|
||||
} else {
|
||||
env = env2
|
||||
}
|
||||
for k, v := range env0 {
|
||||
env[k] = v
|
||||
}
|
||||
return env
|
||||
}
|
||||
|
||||
// podmanEnvNames gets the necessary podman env variables to reset after using minikube's podman service
|
||||
func podmanEnvNames(ec PodmanEnvConfig) []string {
|
||||
// podman v1
|
||||
vars1 := []string{
|
||||
constants.PodmanVarlinkBridgeEnv,
|
||||
}
|
||||
// podman v2
|
||||
vars2 := []string{
|
||||
constants.PodmanContainerHostEnv,
|
||||
constants.PodmanContainerSSHKeyEnv,
|
||||
}
|
||||
// common
|
||||
vars0 := []string{
|
||||
constants.MinikubeActivePodmanEnv,
|
||||
}
|
||||
|
||||
var vars []string
|
||||
if ec.client != nil || ec.hostname != "" {
|
||||
// getting ec.varlink needs a running machine
|
||||
if ec.varlink {
|
||||
vars = vars1
|
||||
} else {
|
||||
vars = vars2
|
||||
}
|
||||
} else {
|
||||
// just unset *all* of the variables instead
|
||||
vars = vars1
|
||||
vars = append(vars, vars2...)
|
||||
}
|
||||
vars = append(vars, vars0...)
|
||||
return vars
|
||||
}
|
||||
|
||||
func init() {
|
||||
podmanEnvCmd.Flags().StringVar(&shell.ForceShell, "shell", "", "Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect")
|
||||
podmanEnvCmd.Flags().BoolVarP(&podmanUnset, "unset", "u", false, "Unset variables instead of setting them")
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ func TestGeneratePodmanScripts(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
"bash",
|
||||
PodmanEnvConfig{profile: "bash", driver: "kvm2", client: newFakeClient()},
|
||||
PodmanEnvConfig{profile: "bash", driver: "kvm2", varlink: true, client: newFakeClient()},
|
||||
nil,
|
||||
`export PODMAN_VARLINK_BRIDGE="/usr/bin/ssh root@host -- sudo varlink -A \'podman varlink \\\$VARLINK_ADDRESS\' bridge"
|
||||
export MINIKUBE_ACTIVE_PODMAN="bash"
|
||||
|
|
@ -50,6 +50,19 @@ export MINIKUBE_ACTIVE_PODMAN="bash"
|
|||
# eval $(minikube -p bash podman-env)
|
||||
`,
|
||||
`unset PODMAN_VARLINK_BRIDGE MINIKUBE_ACTIVE_PODMAN
|
||||
`,
|
||||
},
|
||||
{
|
||||
"bash",
|
||||
PodmanEnvConfig{profile: "bash", driver: "kvm2", client: newFakeClient(), username: "root", hostname: "host", port: 22},
|
||||
nil,
|
||||
`export CONTAINER_HOST="ssh://root@host:22/run/podman/podman.sock"
|
||||
export MINIKUBE_ACTIVE_PODMAN="bash"
|
||||
|
||||
# To point your shell to minikube's podman service, run:
|
||||
# eval $(minikube -p bash podman-env)
|
||||
`,
|
||||
`unset CONTAINER_HOST CONTAINER_SSHKEY MINIKUBE_ACTIVE_PODMAN
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
goflag "flag"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
|
@ -71,7 +71,23 @@ func Execute() {
|
|||
_, callingCmd := filepath.Split(os.Args[0])
|
||||
|
||||
if callingCmd == "kubectl" {
|
||||
os.Args = append([]string{RootCmd.Use, callingCmd}, os.Args[1:]...)
|
||||
// If the user is using the minikube binary as kubectl, allow them to specify the kubectl context without also specifying minikube profile
|
||||
profile := ""
|
||||
for i, a := range os.Args {
|
||||
if a == "--context" {
|
||||
profile = fmt.Sprintf("--profile=%s", os.Args[i+1])
|
||||
break
|
||||
} else if strings.HasPrefix(a, "--context=") {
|
||||
context := strings.Split(a, "=")[1]
|
||||
profile = fmt.Sprintf("--profile=%s", context)
|
||||
break
|
||||
}
|
||||
}
|
||||
if profile != "" {
|
||||
os.Args = append([]string{RootCmd.Use, callingCmd, profile, "--"}, os.Args[1:]...)
|
||||
} else {
|
||||
os.Args = append([]string{RootCmd.Use, callingCmd, "--"}, os.Args[1:]...)
|
||||
}
|
||||
}
|
||||
for _, c := range RootCmd.Commands() {
|
||||
c.Short = translate.T(c.Short)
|
||||
|
|
@ -140,6 +156,18 @@ func usageTemplate() string {
|
|||
}
|
||||
|
||||
func init() {
|
||||
klog.InitFlags(nil)
|
||||
// preset logtostderr and alsologtostderr only for test runs, for normal runs consider flags in main()
|
||||
if strings.HasPrefix(filepath.Base(os.Args[0]), "e2e-") || strings.HasSuffix(os.Args[0], "test") {
|
||||
if err := flag.Set("logtostderr", "false"); err != nil {
|
||||
klog.Warningf("Unable to set default flag value for logtostderr: %v", err)
|
||||
}
|
||||
if err := flag.Set("alsologtostderr", "false"); err != nil {
|
||||
klog.Warningf("Unable to set default flag value for alsologtostderr: %v", err)
|
||||
}
|
||||
}
|
||||
pflag.CommandLine.AddGoFlagSet(flag.CommandLine) // avoid `generate-docs_test.go` complaining about "Docs are not updated"
|
||||
|
||||
RootCmd.PersistentFlags().StringP(config.ProfileName, "p", constants.DefaultClusterName, `The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently.`)
|
||||
RootCmd.PersistentFlags().StringP(configCmd.Bootstrapper, "b", "kubeadm", "The name of the cluster bootstrapper that will set up the Kubernetes cluster.")
|
||||
|
||||
|
|
@ -207,15 +235,6 @@ func init() {
|
|||
RootCmd.AddCommand(completionCmd)
|
||||
templates.ActsAsRootCommand(RootCmd, []string{"options"}, groups...)
|
||||
|
||||
klog.InitFlags(nil)
|
||||
if err := goflag.Set("logtostderr", "false"); err != nil {
|
||||
klog.Warningf("Unable to set default flag value for logtostderr: %v", err)
|
||||
}
|
||||
if err := goflag.Set("alsologtostderr", "false"); err != nil {
|
||||
klog.Warningf("Unable to set default flag value for alsologtostderr: %v", err)
|
||||
}
|
||||
|
||||
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
|
||||
if err := viper.BindPFlags(RootCmd.PersistentFlags()); err != nil {
|
||||
exit.Error(reason.InternalBindFlags, "Unable to bind flags", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -128,7 +128,7 @@ func platform() string {
|
|||
func runStart(cmd *cobra.Command, args []string) {
|
||||
register.SetEventLogPath(localpath.EventLog(ClusterFlagValue()))
|
||||
|
||||
out.SetJSON(viper.GetString(startOutput) == "json")
|
||||
out.SetJSON(outputFormat == "json")
|
||||
displayVersion(version.GetVersion())
|
||||
|
||||
// No need to do the update check if no one is going to see it
|
||||
|
|
@ -436,7 +436,7 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
|
|||
if kcs.KeepContext {
|
||||
out.T(style.Kubectl, "To connect to this cluster, use: --context={{.name}}", out.V{"name": kcs.ClusterName})
|
||||
} else {
|
||||
out.T(style.Ready, `Done! kubectl is now configured to use "{{.name}}" by default`, out.V{"name": machineName})
|
||||
out.T(style.Ready, `Done! kubectl is now configured to use "{{.name}}" cluster and "{{.ns}}" namespace by default`, out.V{"name": machineName, "ns": kcs.Namespace})
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
@ -464,7 +464,7 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
|
|||
out.Ln("")
|
||||
out.WarningT("{{.path}} is version {{.client_version}}, which may have incompatibilites with Kubernetes {{.cluster_version}}.",
|
||||
out.V{"path": path, "client_version": client, "cluster_version": cluster})
|
||||
out.T(style.Tip, "Want kubectl {{.version}}? Try 'minikube kubectl -- get pods -A'", out.V{"version": k8sVersion})
|
||||
out.Infof("Want kubectl {{.version}}? Try 'minikube kubectl -- get pods -A'", out.V{"version": k8sVersion})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1074,7 +1074,7 @@ func validateFlags(cmd *cobra.Command, drvName string) {
|
|||
}
|
||||
}
|
||||
|
||||
if s := viper.GetString(startOutput); s != "text" && s != "json" {
|
||||
if outputFormat != "text" && outputFormat != "json" {
|
||||
exit.Message(reason.Usage, "Sorry, please set the --output flag to one of the following valid options: [text,json]")
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -106,8 +106,12 @@ const (
|
|||
deleteOnFailure = "delete-on-failure"
|
||||
forceSystemd = "force-systemd"
|
||||
kicBaseImage = "base-image"
|
||||
startOutput = "output"
|
||||
ports = "ports"
|
||||
startNamespace = "namespace"
|
||||
)
|
||||
|
||||
var (
|
||||
outputFormat string
|
||||
)
|
||||
|
||||
// initMinikubeFlags includes commandline flags for minikube.
|
||||
|
|
@ -147,12 +151,13 @@ func initMinikubeFlags() {
|
|||
startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.")
|
||||
startCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
startCmd.Flags().Bool(forceSystemd, false, "If set, force the container runtime to use sytemd as cgroup manager. Currently available for docker and crio. Defaults to false.")
|
||||
startCmd.Flags().StringP(startOutput, "o", "text", "Format to print stdout in. Options include: [text,json]")
|
||||
startCmd.Flags().StringVarP(&outputFormat, "output", "o", "text", "Format to print stdout in. Options include: [text,json]")
|
||||
}
|
||||
|
||||
// initKubernetesFlags inits the commandline flags for Kubernetes related options
|
||||
func initKubernetesFlags() {
|
||||
startCmd.Flags().String(kubernetesVersion, "", fmt.Sprintf("The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for %s, 'latest' for %s). Defaults to 'stable'.", constants.DefaultKubernetesVersion, constants.NewestKubernetesVersion))
|
||||
startCmd.Flags().String(startNamespace, "default", "The named space to activate after start")
|
||||
startCmd.Flags().Var(&config.ExtraOptions, "extra-config",
|
||||
`A set of key=value pairs that describe configuration that may be passed to different components.
|
||||
The key should be '.' separated, and the first part before the dot is the component to apply the configuration to.
|
||||
|
|
@ -162,7 +167,7 @@ func initKubernetesFlags() {
|
|||
startCmd.Flags().String(dnsDomain, constants.ClusterDNSDomain, "The cluster dns domain name used in the Kubernetes cluster")
|
||||
startCmd.Flags().Int(apiServerPort, constants.APIServerPort, "The apiserver listening port")
|
||||
startCmd.Flags().String(apiServerName, constants.APIServerName, "The authoritative apiserver hostname for apiserver certificates and connectivity. This can be used if you want to make the apiserver available from outside the machine")
|
||||
startCmd.Flags().StringArrayVar(&apiServerNames, "apiserver-names", nil, "A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
|
||||
startCmd.Flags().StringSliceVar(&apiServerNames, "apiserver-names", nil, "A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
|
||||
startCmd.Flags().IPSliceVar(&apiServerIPs, "apiserver-ips", nil, "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
|
||||
}
|
||||
|
||||
|
|
@ -324,6 +329,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
|
|||
KubernetesConfig: config.KubernetesConfig{
|
||||
KubernetesVersion: k8sVersion,
|
||||
ClusterName: ClusterFlagValue(),
|
||||
Namespace: viper.GetString(startNamespace),
|
||||
APIServerName: viper.GetString(apiServerName),
|
||||
APIServerNames: apiServerNames,
|
||||
APIServerIPs: apiServerIPs,
|
||||
|
|
@ -545,6 +551,10 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
|
|||
cc.KubernetesConfig.KubernetesVersion = getKubernetesVersion(existing)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(startNamespace) {
|
||||
cc.KubernetesConfig.Namespace = viper.GetString(startNamespace)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(apiServerName) {
|
||||
cc.KubernetesConfig.APIServerName = viper.GetString(apiServerName)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/node"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/out/register"
|
||||
"k8s.io/minikube/pkg/minikube/reason"
|
||||
"k8s.io/minikube/pkg/version"
|
||||
|
|
@ -53,6 +54,7 @@ var (
|
|||
statusFormat string
|
||||
output string
|
||||
layout string
|
||||
watch time.Duration
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -196,13 +198,27 @@ var statusCmd = &cobra.Command{
|
|||
Exit status contains the status of minikube's VM, cluster and Kubernetes encoded on it's bits in this order from right to left.
|
||||
Eg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for Kubernetes NOK)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output = strings.ToLower(output)
|
||||
if output != "text" && statusFormat != defaultStatusFormat {
|
||||
exit.Message(reason.Usage, "Cannot use both --output and --format options")
|
||||
}
|
||||
|
||||
out.SetJSON(output == "json")
|
||||
|
||||
cname := ClusterFlagValue()
|
||||
api, cc := mustload.Partial(cname)
|
||||
|
||||
duration := watch
|
||||
if !cmd.Flags().Changed("watch") || watch < 0 {
|
||||
duration = 0
|
||||
}
|
||||
writeStatusesAtInterval(duration, api, cc)
|
||||
},
|
||||
}
|
||||
|
||||
// writeStatusesAtInterval writes statuses in a given output format - at intervals defined by duration
|
||||
func writeStatusesAtInterval(duration time.Duration, api libmachine.API, cc *config.ClusterConfig) {
|
||||
for {
|
||||
var statuses []*Status
|
||||
|
||||
if nodeName != "" || statusFormat != defaultStatusFormat && len(cc.Nodes) > 1 {
|
||||
|
|
@ -233,7 +249,7 @@ var statusCmd = &cobra.Command{
|
|||
}
|
||||
}
|
||||
|
||||
switch strings.ToLower(output) {
|
||||
switch output {
|
||||
case "text":
|
||||
for _, st := range statuses {
|
||||
if err := statusText(st, os.Stdout); err != nil {
|
||||
|
|
@ -255,8 +271,11 @@ var statusCmd = &cobra.Command{
|
|||
exit.Message(reason.Usage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
|
||||
}
|
||||
|
||||
os.Exit(exitCode(statuses))
|
||||
},
|
||||
if duration == 0 {
|
||||
os.Exit(exitCode(statuses))
|
||||
}
|
||||
time.Sleep(duration)
|
||||
}
|
||||
}
|
||||
|
||||
// exitCode calcluates the appropriate exit code given a set of status messages
|
||||
|
|
@ -387,6 +406,8 @@ For the list accessible variables for the template, see the struct values here:
|
|||
statusCmd.Flags().StringVarP(&layout, "layout", "l", "nodes",
|
||||
`output layout (EXPERIMENTAL, JSON only): 'nodes' or 'cluster'`)
|
||||
statusCmd.Flags().StringVarP(&nodeName, "node", "n", "", "The node to check status for. Defaults to control plane. Leave blank with default format for status on all nodes.")
|
||||
statusCmd.Flags().DurationVarP(&watch, "watch", "w", 1*time.Second, "Continuously listing/getting the status with optional interval duration.")
|
||||
statusCmd.Flags().Lookup("watch").NoOptDefVal = "1s"
|
||||
}
|
||||
|
||||
func statusText(st *Status, w io.Writer) error {
|
||||
|
|
@ -544,7 +565,7 @@ func clusterState(sts []*Status) ClusterState {
|
|||
}
|
||||
exitCode, err := strconv.Atoi(data["exitcode"])
|
||||
if err != nil {
|
||||
klog.Errorf("unable to convert exit code to int: %v", err)
|
||||
klog.Errorf("exit code not found: %v", err)
|
||||
continue
|
||||
}
|
||||
if val, ok := exitCodeToHTTPCode[exitCode]; ok {
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/docker/machine/libmachine"
|
||||
|
|
@ -35,13 +37,15 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/out/register"
|
||||
"k8s.io/minikube/pkg/minikube/reason"
|
||||
"k8s.io/minikube/pkg/minikube/schedule"
|
||||
"k8s.io/minikube/pkg/minikube/style"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
||||
var (
|
||||
stopAll bool
|
||||
keepActive bool
|
||||
stopAll bool
|
||||
keepActive bool
|
||||
scheduledStopDuration time.Duration
|
||||
)
|
||||
|
||||
// stopCmd represents the stop command
|
||||
|
|
@ -55,6 +59,11 @@ var stopCmd = &cobra.Command{
|
|||
func init() {
|
||||
stopCmd.Flags().BoolVar(&stopAll, "all", false, "Set flag to stop all profiles (clusters)")
|
||||
stopCmd.Flags().BoolVar(&keepActive, "keep-context-active", false, "keep the kube-context active after cluster is stopped. Defaults to false.")
|
||||
stopCmd.Flags().DurationVar(&scheduledStopDuration, "schedule", 0*time.Second, "Set flag to stop cluster after a set amount of time (e.g. --schedule=5m)")
|
||||
if err := stopCmd.Flags().MarkHidden("schedule"); err != nil {
|
||||
klog.Info("unable to mark --schedule flag as hidden")
|
||||
}
|
||||
stopCmd.Flags().StringVarP(&outputFormat, "output", "o", "text", "Format to print stdout in. Options include: [text,json]")
|
||||
|
||||
if err := viper.GetViper().BindPFlags(stopCmd.Flags()); err != nil {
|
||||
exit.Error(reason.InternalFlagsBind, "unable to bind flags", err)
|
||||
|
|
@ -63,9 +72,14 @@ func init() {
|
|||
|
||||
// runStop handles the executes the flow of "minikube stop"
|
||||
func runStop(cmd *cobra.Command, args []string) {
|
||||
register.SetEventLogPath(localpath.EventLog(ClusterFlagValue()))
|
||||
out.SetJSON(outputFormat == "json")
|
||||
register.Reg.SetStep(register.Stopping)
|
||||
|
||||
// check if profile path exists, if no PathError log file exists for valid profile
|
||||
if _, err := os.Stat(localpath.Profile(ClusterFlagValue())); err == nil {
|
||||
register.SetEventLogPath(localpath.EventLog(ClusterFlagValue()))
|
||||
}
|
||||
|
||||
// new code
|
||||
var profilesToStop []string
|
||||
if stopAll {
|
||||
|
|
@ -81,6 +95,20 @@ func runStop(cmd *cobra.Command, args []string) {
|
|||
profilesToStop = append(profilesToStop, cname)
|
||||
}
|
||||
|
||||
// Kill any existing scheduled stops
|
||||
schedule.KillExisting(profilesToStop)
|
||||
|
||||
if scheduledStopDuration != 0 {
|
||||
if runtime.GOOS == "windows" {
|
||||
exit.Message(reason.Usage, "the --schedule flag is currently not supported on windows")
|
||||
}
|
||||
if err := schedule.Daemonize(profilesToStop, scheduledStopDuration); err != nil {
|
||||
exit.Message(reason.DaemonizeError, "unable to daemonize: {{.err}}", out.V{"err": err.Error()})
|
||||
}
|
||||
klog.Infof("sleeping %s before completing stop...", scheduledStopDuration.String())
|
||||
time.Sleep(scheduledStopDuration)
|
||||
}
|
||||
|
||||
stoppedNodes := 0
|
||||
for _, profile := range profilesToStop {
|
||||
stoppedNodes = stopProfile(profile)
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ var unpauseCmd = &cobra.Command{
|
|||
register.SetEventLogPath(localpath.EventLog(cname))
|
||||
|
||||
co := mustload.Running(cname)
|
||||
out.SetJSON(outputFormat == "json")
|
||||
register.Reg.SetStep(register.Unpausing)
|
||||
|
||||
klog.Infof("namespaces: %v keys: %v", namespaces, viper.AllSettings())
|
||||
|
|
@ -106,4 +107,5 @@ var unpauseCmd = &cobra.Command{
|
|||
func init() {
|
||||
unpauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", constants.DefaultNamespaces, "namespaces to unpause")
|
||||
unpauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, unpause all namespaces")
|
||||
unpauseCmd.Flags().StringVarP(&outputFormat, "output", "o", "text", "Format to print stdout in. Options include: [text,json]")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,17 +18,15 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
// initflag must be imported before any other minikube pkg.
|
||||
// Fix for https://github.com/kubernetes/minikube/issues/4866
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/klog/v2"
|
||||
_ "k8s.io/minikube/pkg/initflag"
|
||||
|
||||
// Register drivers
|
||||
_ "k8s.io/minikube/pkg/minikube/registry/drvs"
|
||||
|
|
@ -61,6 +59,8 @@ func main() {
|
|||
bridgeLogMessages()
|
||||
defer klog.Flush()
|
||||
|
||||
setFlags()
|
||||
|
||||
s := stacklog.MustStartFromEnv("STACKLOG_PATH")
|
||||
defer s.Stop()
|
||||
|
||||
|
|
@ -120,3 +120,35 @@ func (lb machineLogBridge) Write(b []byte) (n int, err error) {
|
|||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// setFlags sets the flags
|
||||
func setFlags() {
|
||||
// parse flags beyond subcommand - get aroung go flag 'limitations':
|
||||
// "Flag parsing stops just before the first non-flag argument" (ref: https://pkg.go.dev/flag#hdr-Command_line_flag_syntax)
|
||||
pflag.CommandLine.ParseErrorsWhitelist.UnknownFlags = true
|
||||
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
|
||||
// avoid 'pflag: help requested' error, as help will be defined later by cobra cmd.Execute()
|
||||
pflag.BoolP("help", "h", false, "")
|
||||
pflag.Parse()
|
||||
|
||||
// set default flag value for logtostderr and alsologtostderr but don't override user's preferences
|
||||
if !pflag.CommandLine.Changed("logtostderr") {
|
||||
if err := pflag.Set("logtostderr", "false"); err != nil {
|
||||
klog.Warningf("Unable to set default flag value for logtostderr: %v", err)
|
||||
}
|
||||
}
|
||||
if !pflag.CommandLine.Changed("alsologtostderr") {
|
||||
if err := pflag.Set("alsologtostderr", "false"); err != nil {
|
||||
klog.Warningf("Unable to set default flag value for alsologtostderr: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// make sure log_dir exists if log_file is not also set - the log_dir is mutually exclusive with the log_file option
|
||||
// ref: https://github.com/kubernetes/klog/blob/52c62e3b70a9a46101f33ebaf0b100ec55099975/klog.go#L491
|
||||
if pflag.Lookup("log_file") != nil && pflag.Lookup("log_file").Value.String() == "" &&
|
||||
pflag.Lookup("log_dir") != nil && pflag.Lookup("log_dir").Value.String() != "" {
|
||||
if err := os.MkdirAll(pflag.Lookup("log_dir").Value.String(), 0755); err != nil {
|
||||
klog.Warningf("unable to create log directory: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.4
|
||||
image: {{default "kubernetesui" .ImageRepository}}/metrics-scraper:v1.0.4
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
|
|
@ -91,7 +91,7 @@ spec:
|
|||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
# WARNING: This must match pkg/minikube/bootstrapper/images/images.go
|
||||
image: kubernetesui/dashboard:v2.0.3
|
||||
image: {{default "kubernetesui" .ImageRepository}}/dashboard:v2.0.3
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
protocol: TCP
|
||||
|
|
@ -62,7 +62,7 @@ spec:
|
|||
- name: ES_JAVA_OPTS
|
||||
value: "-Xms1024m -Xmx1024m"
|
||||
initContainers:
|
||||
- image: registry.hub.docker.com/library/alpine:3.6
|
||||
- image: {{default "registry.hub.docker.com/library" .ImageRepository}}/alpine:3.6
|
||||
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
|
||||
name: elasticsearch-logging-init
|
||||
securityContext:
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: kibana-logging
|
||||
image: docker.elastic.co/kibana/kibana:5.6.2
|
||||
image: {{default "docker.elastic.co/kibana" .ImageRepository}}/kibana:5.6.2
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ spec:
|
|||
serviceAccountName: minikube-gcp-auth-certs
|
||||
containers:
|
||||
- name: create
|
||||
image: jettech/kube-webhook-certgen:v1.3.0
|
||||
image: {{default "jettech" .ImageRepository}}/kube-webhook-certgen:v1.3.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- create
|
||||
|
|
@ -80,7 +80,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: gcp-auth
|
||||
image: gcr.io/k8s-minikube/gcp-auth-webhook:v0.0.3
|
||||
image: {{default "gcr.io/k8s-minikube" .ImageRepository}}/gcp-auth-webhook:v0.0.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
|
|
@ -113,7 +113,7 @@ spec:
|
|||
serviceAccountName: minikube-gcp-auth-certs
|
||||
containers:
|
||||
- name: patch
|
||||
image: jettech/kube-webhook-certgen:v1.3.0
|
||||
image: {{default "jettech" .ImageRepository}}/kube-webhook-certgen:v1.3.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- patch
|
||||
|
|
@ -50,7 +50,7 @@ spec:
|
|||
hostPath:
|
||||
path: /
|
||||
initContainers:
|
||||
- image: {{default "k8s.gcr.io" .ImageRepository}}/minikube-nvidia-driver-installer@sha256:492d46f2bc768d6610ec5940b6c3c33c75e03e201cc8786e04cc488659fd6342
|
||||
- image: {{default "k8s.gcr.io" .ImageRepository}}/minikube-nvidia-driver-installer
|
||||
name: nvidia-driver-installer
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ spec:
|
|||
hostPath:
|
||||
path: /var/lib/kubelet/device-plugins
|
||||
containers:
|
||||
- image: "nvidia/k8s-device-plugin:1.0.0-beta4"
|
||||
- image: {{default "nvidia" .ImageRepository}}/k8s-device-plugin:1.0.0-beta4
|
||||
command: ["/usr/bin/nvidia-device-plugin", "-logtostderr"]
|
||||
name: nvidia-gpu-device-plugin
|
||||
resources:
|
||||
|
|
@ -46,7 +46,7 @@ spec:
|
|||
value: kube-system
|
||||
- name: TILLER_HISTORY_MAX
|
||||
value: "0"
|
||||
image: gcr.io/kubernetes-helm/tiller:v2.16.12
|
||||
image: {{default "gcr.io/kubernetes-helm" .ImageRepository}}/tiller:v2.16.12
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ spec:
|
|||
hostNetwork: true
|
||||
containers:
|
||||
- name: minikube-ingress-dns
|
||||
image: "cryptexlabs/minikube-ingress-dns:0.3.0"
|
||||
image: {{default "cryptexlabs" .ImageRepository}}/minikube-ingress-dns:0.3.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 53
|
||||
|
|
@ -49,7 +49,7 @@ spec:
|
|||
serviceAccountName: ingress-nginx
|
||||
containers:
|
||||
- name: controller
|
||||
image: us.gcr.io/k8s-artifacts-prod/ingress-nginx/controller:v0.34.1@sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
|
||||
image: {{default "us.gcr.io/k8s-artifacts-prod/ingress-nginx" .ImageRepository}}/controller:v0.40.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
preStop:
|
||||
|
|
@ -141,21 +141,25 @@ webhooks:
|
|||
- name: validate.nginx.ingress.kubernetes.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- ingresses
|
||||
failurePolicy: Fail
|
||||
sideEffects: None
|
||||
admissionReviewVersions:
|
||||
- v1
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: kube-system
|
||||
name: ingress-nginx-controller-admission
|
||||
path: /extensions/v1beta1/ingresses
|
||||
path: /networking/v1beta1/ingresses
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
|
|
@ -213,7 +217,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: create
|
||||
image: jettech/kube-webhook-certgen:v1.2.2
|
||||
image: {{default "jettech" .ImageRepository}}/kube-webhook-certgen:v1.2.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- create
|
||||
|
|
@ -248,7 +252,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: patch
|
||||
image: jettech/kube-webhook-certgen:v1.2.2
|
||||
image: {{default "jettech" .ImageRepository}}/kube-webhook-certgen:v1.3.0
|
||||
imagePullPolicy:
|
||||
args:
|
||||
- patch
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ spec:
|
|||
containers:
|
||||
- name: logviewer
|
||||
imagePullPolicy: Always
|
||||
image: docker.io/ivans3/minikube-log-viewer:latest
|
||||
image: {{default "docker.io/ivans3" .ImageRepository}}/minikube-log-viewer:latest
|
||||
volumeMounts:
|
||||
- name: logs
|
||||
mountPath: /var/log/containers/
|
||||
|
|
|
|||
|
|
@ -212,7 +212,7 @@ spec:
|
|||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.hostIP
|
||||
image: metallb/speaker:v0.8.2
|
||||
image: {{default "metallb" .ImageRepository}}/speaker:v0.8.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: speaker
|
||||
ports:
|
||||
|
|
@ -268,7 +268,7 @@ spec:
|
|||
- args:
|
||||
- --port=7472
|
||||
- --config=config
|
||||
image: metallb/controller:v0.8.2
|
||||
image: {{default "metallb" .ImageRepository}}/controller:v0.8.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: controller
|
||||
ports:
|
||||
|
|
@ -17,7 +17,7 @@ spec:
|
|||
spec:
|
||||
initContainers:
|
||||
- name: update
|
||||
image: alpine:3.11
|
||||
image: {{default "registry.hub.docker.com/library" .ImageRepository}}/alpine:3.11
|
||||
volumeMounts:
|
||||
- name: etchosts
|
||||
mountPath: /host-etc/hosts
|
||||
|
|
@ -43,7 +43,7 @@ spec:
|
|||
echo "Done."
|
||||
containers:
|
||||
- name: pause-for-update
|
||||
image: gcr.io/google_containers/pause-amd64:3.1
|
||||
image: {{default "gcr.io/google_containers" .ImageRepository}}/pause-amd64:3.1
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: etchosts
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ spec:
|
|||
path: /var/lib/minikube/binaries
|
||||
containers:
|
||||
- name: core-dns-patcher
|
||||
image: quay.io/rhdevelopers/core-dns-patcher
|
||||
image: {{default "quay.io/rhdevelopers" .ImageRepository}}/core-dns-patcher
|
||||
imagePullPolicy: IfNotPresent
|
||||
# using the kubectl from the minikube instance
|
||||
volumeMounts:
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ data:
|
|||
example.com
|
||||
test.com
|
||||
test.org
|
||||
registry.minikube
|
||||
# default registry address in minikube when enabled via minikube addons enable registry
|
||||
registrySvc: registry.kube-system.svc.cluster.local
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ spec:
|
|||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
containers:
|
||||
- image: upmcenterprises/registry-creds:1.10
|
||||
- image: {{default "upmcenterprises" .ImageRepository}}/registry-creds:1.10
|
||||
name: registry-creds
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ spec:
|
|||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/kube-registry-proxy:0.4
|
||||
- image: {{default "gcr.io/google_containers" .ImageRepository}}/kube-registry-proxy:0.4
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: registry-proxy
|
||||
ports:
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ spec:
|
|||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
containers:
|
||||
- image: registry.hub.docker.com/library/registry:2.7.1
|
||||
- image: {{default "registry.hub.docker.com/library" .ImageRepository}}/registry:2.7.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: registry
|
||||
ports:
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ spec:
|
|||
# kubernetes.io/hostname: minikube
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- image: quay.io/nixpanic/glusterfs-server:pr_fake-disk
|
||||
- image: {{default "quay.io/nixpanic" .ImageRepository}}/glusterfs-server:pr_fake-disk
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: glusterfs
|
||||
env:
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ spec:
|
|||
spec:
|
||||
serviceAccountName: heketi-service-account
|
||||
containers:
|
||||
- image: heketi/heketi:latest
|
||||
- image: {{default "heketi" .ImageRepository}}/heketi:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: heketi
|
||||
env:
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ spec:
|
|||
containers:
|
||||
- name: volume-snapshot-controller
|
||||
# TODO(xyang): Replace with an official image when it is released
|
||||
image: gcr.io/k8s-staging-csi/snapshot-controller:v2.0.0-rc2
|
||||
image: {{default "gcr.io/k8s-staging-csi" .ImageRepository}}/snapshot-controller:v2.0.0-rc2
|
||||
args:
|
||||
- "--v=5"
|
||||
imagePullPolicy: Always
|
||||
|
|
@ -313,7 +313,6 @@ CONFIG_VMWARE_BALLOON=m
|
|||
CONFIG_VMWARE_VMCI=m
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_BLK_DEV_SR=y
|
||||
CONFIG_BLK_DEV_SR_VENDOR=y
|
||||
CONFIG_CHR_DEV_SG=y
|
||||
CONFIG_SCSI_CONSTANTS=y
|
||||
CONFIG_SCSI_SPI_ATTRS=y
|
||||
|
|
@ -408,7 +407,6 @@ CONFIG_FB_MODE_HELPERS=y
|
|||
CONFIG_FB_TILEBLITTING=y
|
||||
CONFIG_FB_EFI=y
|
||||
# CONFIG_LCD_CLASS_DEVICE is not set
|
||||
CONFIG_VGACON_SOFT_SCROLLBACK=y
|
||||
CONFIG_LOGO=y
|
||||
# CONFIG_LOGO_LINUX_MONO is not set
|
||||
# CONFIG_LOGO_LINUX_VGA16 is not set
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ BR2_GLOBAL_PATCH_DIR="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/patche
|
|||
BR2_LINUX_KERNEL=y
|
||||
BR2_LINUX_KERNEL_LATEST_VERSION=n
|
||||
BR2_LINUX_KERNEL_CUSTOM_VERSION=y
|
||||
BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="4.19.114"
|
||||
BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="4.19.150"
|
||||
BR2_LINUX_KERNEL_BZIMAGE=y
|
||||
BR2_LINUX_KERNEL_LZ4=y
|
||||
BR2_LINUX_KERNEL_USE_CUSTOM_CONFIG=y
|
||||
|
|
|
|||
|
|
@ -17,3 +17,4 @@ sha256 865ded95aceb3a33a391b252522682de6b37b39498704c490b3a321dbefaafcb v1.18.0.
|
|||
sha256 794ddc36c2a20fde91fc6cc2c6f02ebdaea85c69b51b67f3994090dbbdbc2a50 v1.18.1.tar.gz
|
||||
sha256 25dc558fbabc987bd58c7eab5230121b258a7b0eb34a49dc6595f1c6f3969116 v1.18.2.tar.gz
|
||||
sha256 d5c6442e3990938badc966cdd1eb9ebe2fc11345452c233aa0d87ca38fbeed81 v1.18.3.tar.gz
|
||||
sha256 74a4e916acddc6cf47ab5752bdebb6732ce2c028505ef57b7edc21d2da9039b6 v1.18.4.tar.gz
|
||||
|
|
|
|||
|
|
@ -4,8 +4,8 @@
|
|||
#
|
||||
################################################################################
|
||||
|
||||
CRIO_BIN_VERSION = v1.18.3
|
||||
CRIO_BIN_COMMIT = 61de18161fb4ccda720768c001713592b5a04e46
|
||||
CRIO_BIN_VERSION = v1.18.4
|
||||
CRIO_BIN_COMMIT = aba91e59ec78e3299e443a7364e2cf8909af4606
|
||||
CRIO_BIN_SITE = https://github.com/cri-o/cri-o/archive
|
||||
CRIO_BIN_SOURCE = $(CRIO_BIN_VERSION).tar.gz
|
||||
CRIO_BIN_DEPENDENCIES = host-go libgpgme
|
||||
|
|
|
|||
|
|
@ -107,16 +107,16 @@ RUN sh -c "echo 'deb https://download.docker.com/linux/ubuntu focal stable' > /e
|
|||
clean-install docker-ce docker-ce-cli containerd.io
|
||||
|
||||
# Install cri-o/podman dependencies:
|
||||
RUN sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" && \
|
||||
RUN sh -c "echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" && \
|
||||
curl -LO https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_20.04/Release.key && \
|
||||
apt-key add - < Release.key && \
|
||||
clean-install containers-common catatonit conmon containernetworking-plugins cri-tools podman-plugins varlink
|
||||
|
||||
# install cri-o based on https://github.com/cri-o/cri-o/commit/96b0c34b31a9fc181e46d7d8e34fb8ee6c4dc4e1#diff-04c6e90faac2675aa89e2176d2eec7d8R128
|
||||
RUN sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/1.18:/1.18.3/xUbuntu_20.04/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" && \
|
||||
curl -LO https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/1.18:/1.18.3/xUbuntu_20.04/Release.key && \
|
||||
# install cri-o based on https://github.com/cri-o/cri-o/blob/release-1.18/README.md#installing-cri-o
|
||||
RUN sh -c "echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/1.18/xUbuntu_20.04/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:1.18.list" && \
|
||||
curl -LO https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/1.18/xUbuntu_20.04/Release.key && \
|
||||
apt-key add - < Release.key && \
|
||||
clean-install cri-o=1.18.3~3
|
||||
clean-install cri-o cri-o-runc
|
||||
|
||||
# install podman
|
||||
RUN sh -c "echo 'deb https://dl.bintray.com/afbjorklund/podman focal main' > /etc/apt/sources.list.d/podman.list" && \
|
||||
|
|
@ -124,8 +124,6 @@ RUN sh -c "echo 'deb https://dl.bintray.com/afbjorklund/podman focal main' > /et
|
|||
apt-key add - < afbjorklund-public.key.asc && \
|
||||
clean-install podman=1.9.3~1
|
||||
|
||||
RUN mkdir -p /usr/lib/cri-o-runc/sbin && cp /usr/bin/runc /usr/lib/cri-o-runc/sbin/runc
|
||||
|
||||
# automount service
|
||||
COPY automount/minikube-automount /usr/sbin/minikube-automount
|
||||
COPY automount/minikube-automount.service /usr/lib/systemd/system/minikube-automount.service
|
||||
|
|
|
|||
|
|
@ -80,9 +80,17 @@ fix_cgroup_mounts() {
|
|||
# environment by doing another bind mount for each subsystem.
|
||||
local cgroup_mounts
|
||||
|
||||
# NOTE: This extracts fields 4 and on
|
||||
# This regexp finds all /sys/fs/cgroup mounts that are cgroupfs and mounted somewhere other than / - extracting fields 4+
|
||||
# See https://man7.org/linux/man-pages/man5/proc.5.html for field names
|
||||
cgroup_mounts=$(egrep -o '(/docker|libpod_parent|/kubepods).*/sys/fs/cgroup.*' /proc/self/mountinfo || true)
|
||||
|
||||
# Example inputs:
|
||||
#
|
||||
# Docker: /docker/562a56986a84b3cd38d6a32ac43fdfcc8ad4d2473acf2839cbf549273f35c206 /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:143 master:23 - cgroup devices rw,devices
|
||||
# podman: /libpod_parent/libpod-73a4fb9769188ae5dc51cb7e24b9f2752a4af7b802a8949f06a7b2f2363ab0e9 ...
|
||||
# Cloud Shell: /kubepods/besteffort/pod3d6beaa3004913efb68ce073d73494b0/accdf94879f0a494f317e9a0517f23cdd18b35ff9439efd0175f17bbc56877c4 /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime master:19 - cgroup cgroup rw,memory
|
||||
# GitHub actions #9304: /actions_job/0924fbbcf7b18d2a00c171482b4600747afc367a9dfbeac9d6b14b35cda80399 /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:263 master:24 - cgroup cgroup rw,memory
|
||||
|
||||
cgroup_mounts=$(grep -E -o '/[[:alnum:]].* /sys/fs/cgroup.*.*cgroup' /proc/self/mountinfo || true)
|
||||
|
||||
if [[ -n "${cgroup_mounts}" ]]; then
|
||||
local mount_root
|
||||
|
|
|
|||
|
|
@ -0,0 +1,85 @@
|
|||
# Tracing minikube
|
||||
|
||||
* First proposed: Oct 30 2020
|
||||
* Authors: Priya Wadhwa (priyawadhwa@)
|
||||
|
||||
## Reviewer Priorities
|
||||
|
||||
Please review this proposal with the following priorities:
|
||||
|
||||
* Does this fit with minikube's [principles](https://minikube.sigs.k8s.io/docs/concepts/principles/)?
|
||||
* Are there other approaches to consider?
|
||||
* Could the implementation be made simpler?
|
||||
* Are there usability, reliability, or technical debt concerns?
|
||||
|
||||
Please leave the above text in your proposal as instructions to the reader.
|
||||
|
||||
## Summary
|
||||
|
||||
This proposal covers using the [OpenTelemetry](https://github.com/open-telemetry/opentelemetry-go) API to provide tracing data for minikube.
|
||||
This data would be useful for maintainers to identify areas for performance improvements.
|
||||
This data would also be used to create a dashboard of current performance and would allow us to catch performance regressions more quickly.
|
||||
|
||||
## Goals
|
||||
|
||||
* Trace data is can be collected and exported for `minikube start`
|
||||
* `minikube start` can either create a new Trace or can read from a file and append data to an existing Trace
|
||||
* It is easy for users to add their own exporters if they wish to export data to their own service
|
||||
* We are able to create dashboards around `minikube start` performance that will alert maintainers if regressions happen
|
||||
|
||||
## Non-Goals
|
||||
|
||||
* Collecting trace data for the minikube cluster while it is running
|
||||
|
||||
## Design Details
|
||||
|
||||
There are two pieces to the design: collecting the data and exporting the data.
|
||||
|
||||
### Collecting Data
|
||||
Luckily, we already have a lot of this infrastructure set up for JSON output.
|
||||
We know when a new substep of `minikube start` has started, because we call it explictly via `register.SetStep`.
|
||||
We also know that substep has ended when a new substep begins.
|
||||
|
||||
We can start new spans whenever `register.SetStep` is called, and thus collect tracing data.
|
||||
|
||||
### Exporting Data
|
||||
OpenTelemetry supports a variety of [user-contributed exporters](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/master/instrumentation).
|
||||
It would be a lot of work to implement all of them ourselves.
|
||||
|
||||
Instead, I propose writing a simple `GetExporter` function that would return whatever exporter is requested via a `--trace` flag.
|
||||
|
||||
So, something like this would tell minikube to use the `stackdriver` exporter:
|
||||
|
||||
```
|
||||
minikube start --trace=stackdriver
|
||||
```
|
||||
|
||||
Users can then contribute to minikube if they need to use an exporter that isn't currently provided.
|
||||
|
||||
Exporters also will require additional information to make sure data is sent to the correct place.
|
||||
This could include things like, but not limited to:
|
||||
* project ID
|
||||
* zone
|
||||
|
||||
Since it could get messy passing in these things as flags to `minikube start`, I propose that these values are set via environment variable.
|
||||
All environment variables will be of the form:
|
||||
|
||||
```
|
||||
MINIKUBE_TRACE_PROJECT_ID
|
||||
```
|
||||
and the user-contributed code is responsible for parsing the environment variables correctly and returning the exporter.
|
||||
|
||||
### Testing Plan
|
||||
|
||||
I will set up a dashboard and alerting system in the minikube GCP project.
|
||||
If we are collecting data at a consistent rate, and the dashboard is populated, we will know that this has worked.
|
||||
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
### Building a Wrapper Binary
|
||||
A wrapper binary could run `minikube start --output json` and collect the same data, and then export it to whatever service we need.
|
||||
|
||||
A large advantage of this is that the minikube code doesn't have to be changed at all for this to work.
|
||||
|
||||
However, I decided against this in case other tools that consume minikube or users want to collect this data as well -- it is much easier to pass in a flag to minikube than to download another binary.
|
||||
4
go.mod
4
go.mod
|
|
@ -8,6 +8,7 @@ require (
|
|||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 // indirect
|
||||
github.com/Parallels/docker-machine-parallels/v2 v2.0.1
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
|
||||
github.com/VividCortex/godaemon v0.0.0-20201030160542-15e3f4925a21
|
||||
github.com/blang/semver v3.5.0+incompatible
|
||||
github.com/c4milo/gotoolkit v0.0.0-20170318115440-bcc06269efa9 // indirect
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible
|
||||
|
|
@ -24,6 +25,7 @@ require (
|
|||
github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f
|
||||
github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f // indirect
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible // indirect
|
||||
github.com/go-logr/logr v0.3.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.4 // indirect
|
||||
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
|
||||
github.com/google/go-cmp v0.5.2
|
||||
|
|
@ -87,7 +89,7 @@ require (
|
|||
k8s.io/api v0.17.4
|
||||
k8s.io/apimachinery v0.17.4
|
||||
k8s.io/client-go v0.17.4
|
||||
k8s.io/klog/v2 v2.3.0
|
||||
k8s.io/klog/v2 v2.4.0
|
||||
k8s.io/kubectl v0.0.0
|
||||
k8s.io/kubernetes v1.18.5
|
||||
sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.0+incompatible // indirect
|
||||
|
|
|
|||
8
go.sum
8
go.sum
|
|
@ -118,6 +118,8 @@ github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUW
|
|||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
github.com/VividCortex/godaemon v0.0.0-20201030160542-15e3f4925a21 h1:Pgxfz/g+XyfRjYqRjKUFpDh5IciFncmA/Uio6AU/z9g=
|
||||
github.com/VividCortex/godaemon v0.0.0-20201030160542-15e3f4925a21/go.mod h1:Y8CJ3IwPIAkMhv/rRUWIlczaeqd9ty9yrl+nc2AbaL4=
|
||||
github.com/afbjorklund/go-containerregistry v0.0.0-20200902152226-fbad78ec2813 h1:0tskN1ipU/BBrpoEIy0rdZS9jf5+wdP6IMRak8Iu/YE=
|
||||
github.com/afbjorklund/go-containerregistry v0.0.0-20200902152226-fbad78ec2813/go.mod h1:npTSyywOeILcgWqd+rvtzGWflIPPcBQhYoOONaY4ltM=
|
||||
github.com/afbjorklund/go-getter v1.4.1-0.20201020145846-c0da14b4bffe h1:TdcuDqk4ArmYI8cbeeL/RM5BPciDOaWpGZoPoT3OziQ=
|
||||
|
|
@ -358,6 +360,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
|
|||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs=
|
||||
github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
|
||||
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
|
||||
|
|
@ -1651,8 +1655,8 @@ k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUc
|
|||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/klog/v2 v2.3.0 h1:WmkrnW7fdrm0/DMClc+HIxtftvxVIPAhlVwMQo5yLco=
|
||||
k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
|
||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/kube-aggregator v0.17.3/go.mod h1:1dMwMFQbmH76RKF0614L7dNenMl3dwnUJuOOyZ3GMXA=
|
||||
k8s.io/kube-controller-manager v0.17.3/go.mod h1:22B/TsgVviuCVuNwUrqgyTi5D4AYjMFaK9c8h1oonkY=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
|
||||
|
|
|
|||
|
|
@ -30,9 +30,15 @@ export GOPATH="$HOME/go"
|
|||
export KUBECONFIG="${TEST_HOME}/kubeconfig"
|
||||
export PATH=$PATH:"/usr/local/bin/:/usr/local/go/bin/:$GOPATH/bin"
|
||||
|
||||
# install lsof for finding none driver procs, psmisc to use pstree in cronjobs
|
||||
sudo apt-get -y install lsof psmisc
|
||||
|
||||
# installing golang so we could do go get for gopogh
|
||||
sudo ./installers/check_install_golang.sh "1.15.2" "/usr/local" || true
|
||||
|
||||
# install docker and kubectl if not present
|
||||
sudo ./installers/check_install_docker.sh
|
||||
|
||||
docker rm -f -v $(docker ps -aq) >/dev/null 2>&1 || true
|
||||
docker volume prune -f || true
|
||||
docker system df || true
|
||||
|
|
@ -47,7 +53,7 @@ echo "test home: ${TEST_HOME}"
|
|||
echo "sudo: ${SUDO_PREFIX}"
|
||||
echo "kernel: $(uname -v)"
|
||||
echo "uptime: $(uptime)"
|
||||
# Setting KUBECONFIG prevents the version ceck from erroring out due to permission issues
|
||||
# Setting KUBECONFIG prevents the version check from erroring out due to permission issues
|
||||
echo "kubectl: $(env KUBECONFIG=${TEST_HOME} kubectl version --client --short=true)"
|
||||
echo "docker: $(docker version --format '{{ .Client.Version }}')"
|
||||
echo "podman: $(sudo podman version --format '{{.Version}}' || true)"
|
||||
|
|
@ -211,6 +217,7 @@ if [[ "${kprocs}" != "" ]]; then
|
|||
sudo -E kill ${kprocs} || true
|
||||
fi
|
||||
|
||||
|
||||
# clean up none drivers binding on 8443
|
||||
none_procs=$(sudo lsof -i :8443 | tail -n +2 | awk '{print $2}' || true)
|
||||
if [[ "${none_procs}" != "" ]]; then
|
||||
|
|
|
|||
|
|
@ -0,0 +1,35 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -eux -o pipefail
|
||||
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
echo "detected darwin, exiting"
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Installing latest docker"
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install apt-transport-https ca-certificates curl gnupg-agent software-properties-common
|
||||
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
|
||||
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian `lsb_release -cs` stable"
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install docker-ce docker-ce-cli containerd.io
|
||||
sudo usermod -aG docker jenkins
|
||||
|
||||
echo "Installing latest kubectl"
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl"
|
||||
sudo install ./kubectl /usr/local/bin/kubectl
|
||||
|
|
@ -18,7 +18,7 @@ set -eux -o pipefail
|
|||
|
||||
if (($# < 2)); then
|
||||
echo "ERROR: given ! ($#) number of parameters but expect 2."
|
||||
echo "USAGE: ./check_and_install_golang.sh VERSION_TO_INSTALL INSTALL_PATH"
|
||||
echo "USAGE: ./check_install_golang.sh VERSION_TO_INSTALL INSTALL_PATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
|
|
|||
|
|
@ -36,4 +36,8 @@ EXTRA_TEST_ARGS="-gvisor"
|
|||
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
|
||||
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install qemu-system libvirt-clients libvirt-daemon-system ebtables iptables dnsmasq
|
||||
sudo adduser jenkins libvirt || true
|
||||
|
||||
source ./common.sh
|
||||
|
|
|
|||
|
|
@ -1,463 +0,0 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
The script expects the following env variables:
|
||||
- UPDATE_TARGET=<string>: optional - if unset/absent, default option is "fs"; valid options are:
|
||||
- "fs" - update only local filesystem repo files [default]
|
||||
- "gh" - update only remote GitHub repo files and create PR (if one does not exist already)
|
||||
- "all" - update local and remote repo files and create PR (if one does not exist already)
|
||||
- GITHUB_TOKEN=<string>: The Github API access token. Injected by the Jenkins credential provider.
|
||||
- note: GITHUB_TOKEN is needed only if UPDATE_TARGET is "gh" or "all"
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
|
||||
"github.com/google/go-github/v32/github"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
// default context timeout
|
||||
cxTimeout = 300 * time.Second
|
||||
|
||||
// use max value (100) for PerPage to avoid hitting the rate limits (60 per hour, 10 per minute)
|
||||
// see https://godoc.org/github.com/google/go-github/github#hdr-Rate_Limiting
|
||||
ghListOptionsPerPage = 100
|
||||
)
|
||||
|
||||
var (
|
||||
// root directory of the local filesystem repo to update
|
||||
fsRoot = "../../"
|
||||
|
||||
// map key corresponds to GitHub TreeEntry.Path and local repo file path (prefixed with fsRoot)
|
||||
plan = map[string]Patch{
|
||||
"pkg/minikube/constants/constants.go": {
|
||||
Replace: map[string]string{
|
||||
`DefaultKubernetesVersion = \".*`: `DefaultKubernetesVersion = "{{.K8sStableVersion}}"`,
|
||||
`NewestKubernetesVersion = \".*`: `NewestKubernetesVersion = "{{.K8sLatestVersion}}"`,
|
||||
},
|
||||
},
|
||||
"site/content/en/docs/commands/start.md": {
|
||||
Replace: map[string]string{
|
||||
`'stable' for .*,`: `'stable' for {{.K8sStableVersion}},`,
|
||||
`'latest' for .*\)`: `'latest' for {{.K8sLatestVersion}})`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
target = os.Getenv("UPDATE_TARGET")
|
||||
|
||||
// GitHub repo data
|
||||
ghToken = os.Getenv("GITHUB_TOKEN")
|
||||
ghOwner = "kubernetes"
|
||||
ghRepo = "minikube"
|
||||
ghBase = "master" // could be "main" in the future?
|
||||
|
||||
// PR data
|
||||
prBranchPrefix = "update-kubernetes-version_" // will be appended with first 7 characters of the PR commit SHA
|
||||
prTitle = `update_kubernetes_version: {stable:"{{.K8sStableVersion}}", latest:"{{.K8sLatestVersion}}"}`
|
||||
prIssue = 4392
|
||||
prSearchLimit = 100 // limit the number of previous PRs searched for same prTitle to be <= N * ghListOptionsPerPage
|
||||
)
|
||||
|
||||
// Data holds respective stable (release) and latest (pre-release) Kubernetes versions
|
||||
type Data struct {
|
||||
K8sStableVersion string `json:"k8sStableVersion"`
|
||||
K8sLatestVersion string `json:"k8sLatestVersion"`
|
||||
}
|
||||
|
||||
// Patch defines content where all occurrences of each replace map key should be swapped with its
|
||||
// respective value. Replace map keys can use RegExp and values can use Golang Text Template
|
||||
type Patch struct {
|
||||
Content []byte `json:"-"`
|
||||
Replace map[string]string `json:"replace"`
|
||||
}
|
||||
|
||||
// apply patch to content by replacing all occurrences of map's keys with their respective values
|
||||
func (p *Patch) apply(data interface{}) (changed bool, err error) {
|
||||
if p.Content == nil || p.Replace == nil {
|
||||
return false, fmt.Errorf("nothing to patch")
|
||||
}
|
||||
org := string(p.Content)
|
||||
str := org
|
||||
for src, dst := range p.Replace {
|
||||
re := regexp.MustCompile(src)
|
||||
tmpl := template.Must(template.New("").Parse(dst))
|
||||
buf := new(bytes.Buffer)
|
||||
if err := tmpl.Execute(buf, data); err != nil {
|
||||
return false, err
|
||||
}
|
||||
str = re.ReplaceAllString(str, buf.String())
|
||||
}
|
||||
p.Content = []byte(str)
|
||||
|
||||
return str != org, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
// write log statements to stderr instead of to files
|
||||
if err := flag.Set("logtostderr", "true"); err != nil {
|
||||
fmt.Printf("Error setting 'logtostderr' klog flag: %v", err)
|
||||
}
|
||||
flag.Parse()
|
||||
defer klog.Flush()
|
||||
|
||||
if target == "" {
|
||||
target = "fs"
|
||||
} else if target != "fs" && target != "gh" && target != "all" {
|
||||
klog.Fatalf("Invalid UPDATE_TARGET option: '%s'; Valid options are: unset/absent (defaults to 'fs'), 'fs', 'gh', or 'all'", target)
|
||||
} else if (target == "gh" || target == "all") && ghToken == "" {
|
||||
klog.Fatalf("GITHUB_TOKEN is required if UPDATE_TARGET is 'gh' or 'all'")
|
||||
}
|
||||
|
||||
// set a context with defined timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), cxTimeout)
|
||||
defer cancel()
|
||||
|
||||
// get Kubernetes versions from GitHub Releases
|
||||
stable, latest, err := ghReleases(ctx, "kubernetes", "kubernetes", ghToken)
|
||||
if err != nil || stable == "" || latest == "" {
|
||||
klog.Fatalf("Error getting Kubernetes versions: %v", err)
|
||||
}
|
||||
data := Data{K8sStableVersion: stable, K8sLatestVersion: latest}
|
||||
klog.Infof("Kubernetes versions: 'stable' is %s and 'latest' is %s", data.K8sStableVersion, data.K8sLatestVersion)
|
||||
|
||||
klog.Infof("The Plan:\n%s", thePlan(plan, data))
|
||||
|
||||
if target == "fs" || target == "all" {
|
||||
changed, err := fsUpdate(fsRoot, plan, data)
|
||||
if err != nil {
|
||||
klog.Errorf("Error updating local repo: %v", err)
|
||||
} else if !changed {
|
||||
klog.Infof("Local repo update skipped: nothing changed")
|
||||
} else {
|
||||
klog.Infof("Local repo updated")
|
||||
}
|
||||
}
|
||||
|
||||
if target == "gh" || target == "all" {
|
||||
// update prTitle replacing template placeholders with concrete data values
|
||||
tmpl := template.Must(template.New("prTitle").Parse(prTitle))
|
||||
buf := new(bytes.Buffer)
|
||||
if err := tmpl.Execute(buf, data); err != nil {
|
||||
klog.Fatalf("Error parsing PR Title: %v", err)
|
||||
}
|
||||
prTitle = buf.String()
|
||||
|
||||
// check if PR already exists
|
||||
prURL, err := ghFindPR(ctx, prTitle, ghOwner, ghRepo, ghBase, ghToken)
|
||||
if err != nil {
|
||||
klog.Errorf("Error checking if PR already exists: %v", err)
|
||||
} else if prURL != "" {
|
||||
klog.Infof("PR create skipped: already exists (%s)", prURL)
|
||||
} else {
|
||||
// create PR
|
||||
pr, err := ghCreatePR(ctx, ghOwner, ghRepo, ghBase, prBranchPrefix, prTitle, prIssue, ghToken, plan, data)
|
||||
if err != nil {
|
||||
klog.Fatalf("Error creating PR: %v", err)
|
||||
} else if pr == nil {
|
||||
klog.Infof("PR create skipped: nothing changed")
|
||||
} else {
|
||||
klog.Infof("PR created: %s", *pr.HTMLURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fsUpdate updates local filesystem repo files according to the given plan and data,
|
||||
// returns if the update actually changed anything, and any error occurred
|
||||
func fsUpdate(fsRoot string, plan map[string]Patch, data Data) (changed bool, err error) {
|
||||
for path, p := range plan {
|
||||
path = filepath.Join(fsRoot, path)
|
||||
blob, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
mode := info.Mode()
|
||||
|
||||
p.Content = blob
|
||||
chg, err := p.apply(data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if chg {
|
||||
changed = true
|
||||
}
|
||||
if err := ioutil.WriteFile(path, p.Content, mode); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return changed, nil
|
||||
}
|
||||
|
||||
// ghCreatePR returns PR created in the GitHub owner/repo, applying the changes to the base head
|
||||
// commit fork, as defined by the plan and data, and also returns any error occurred
|
||||
// PR branch will be named by the branch, sufixed by '_' and first 7 characters of fork commit SHA
|
||||
// PR itself will be named by the title and will reference the issue
|
||||
func ghCreatePR(ctx context.Context, owner, repo, base, branch, title string, issue int, token string, plan map[string]Patch, data Data) (*github.PullRequest, error) {
|
||||
ghc := ghClient(ctx, token)
|
||||
|
||||
// get base branch
|
||||
baseBranch, _, err := ghc.Repositories.GetBranch(ctx, owner, repo, base)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting base branch: %w", err)
|
||||
}
|
||||
|
||||
// get base commit
|
||||
baseCommit, _, err := ghc.Repositories.GetCommit(ctx, owner, repo, *baseBranch.Commit.SHA)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting base commit: %w", err)
|
||||
}
|
||||
|
||||
// get base tree
|
||||
baseTree, _, err := ghc.Git.GetTree(ctx, owner, repo, baseCommit.GetSHA(), true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting base tree: %w", err)
|
||||
}
|
||||
|
||||
// update files
|
||||
changes, err := ghUpdate(ctx, owner, repo, baseTree, token, plan, data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error updating files: %w", err)
|
||||
}
|
||||
if changes == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// create fork
|
||||
fork, resp, err := ghc.Repositories.CreateFork(ctx, owner, repo, nil)
|
||||
// https://pkg.go.dev/github.com/google/go-github/v32@v32.1.0/github#RepositoriesService.CreateFork
|
||||
// This method might return an *AcceptedError and a status code of 202. This is because this is
|
||||
// the status that GitHub returns to signify that it is now computing creating the fork in a
|
||||
// background task. In this event, the Repository value will be returned, which includes the
|
||||
// details about the pending fork. A follow up request, after a delay of a second or so, should
|
||||
// result in a successful request.
|
||||
if resp.StatusCode == 202 { // *AcceptedError
|
||||
time.Sleep(time.Second * 5)
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("error creating fork: %w", err)
|
||||
}
|
||||
|
||||
// create fork tree from base and changed files
|
||||
forkTree, _, err := ghc.Git.CreateTree(ctx, *fork.Owner.Login, *fork.Name, *baseTree.SHA, changes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating fork tree: %w", err)
|
||||
}
|
||||
|
||||
// create fork commit
|
||||
forkCommit, _, err := ghc.Git.CreateCommit(ctx, *fork.Owner.Login, *fork.Name, &github.Commit{
|
||||
Message: github.String(title),
|
||||
Tree: &github.Tree{SHA: forkTree.SHA},
|
||||
Parents: []*github.Commit{{SHA: baseCommit.SHA}},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating fork commit: %w", err)
|
||||
}
|
||||
klog.Infof("PR commit '%s' created: %s", forkCommit.GetSHA(), forkCommit.GetHTMLURL())
|
||||
|
||||
// create PR branch
|
||||
prBranch := branch + forkCommit.GetSHA()[:7]
|
||||
prRef, _, err := ghc.Git.CreateRef(ctx, *fork.Owner.Login, *fork.Name, &github.Reference{
|
||||
Ref: github.String("refs/heads/" + prBranch),
|
||||
Object: &github.GitObject{
|
||||
Type: github.String("commit"),
|
||||
SHA: forkCommit.SHA,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating PR branch: %w", err)
|
||||
}
|
||||
klog.Infof("PR branch '%s' created: %s", prBranch, prRef.GetURL())
|
||||
|
||||
// create PR
|
||||
modifiable := true
|
||||
pr, _, err := ghc.PullRequests.Create(ctx, owner, repo, &github.NewPullRequest{
|
||||
Title: github.String(title),
|
||||
Head: github.String(*fork.Owner.Login + ":" + prBranch),
|
||||
Base: github.String(base),
|
||||
Body: github.String(fmt.Sprintf("fixes #%d\n\nAutomatically created PR to update repo according to the Plan:\n\n```\n%s\n```", issue, thePlan(plan, data))),
|
||||
MaintainerCanModify: &modifiable,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating pull request: %w", err)
|
||||
}
|
||||
return pr, nil
|
||||
}
|
||||
|
||||
// ghUpdate updates remote GitHub owner/repo tree according to the given token, plan and data,
|
||||
// returns resulting changes, and any error occurred
|
||||
func ghUpdate(ctx context.Context, owner, repo string, tree *github.Tree, token string, plan map[string]Patch, data Data) (changes []*github.TreeEntry, err error) {
|
||||
ghc := ghClient(ctx, token)
|
||||
|
||||
// load each plan's path content and update it creating new GitHub TreeEntries
|
||||
cnt := len(plan) // expected number of files to change
|
||||
for _, org := range tree.Entries {
|
||||
if *org.Type == "blob" {
|
||||
if patch, match := plan[*org.Path]; match {
|
||||
blob, _, err := ghc.Git.GetBlobRaw(ctx, owner, repo, *org.SHA)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting file: %w", err)
|
||||
}
|
||||
patch.Content = blob
|
||||
changed, err := patch.apply(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error patching file: %w", err)
|
||||
}
|
||||
if changed {
|
||||
// add github.TreeEntry that will replace original path content with patched one
|
||||
changes = append(changes, &github.TreeEntry{
|
||||
Path: org.Path,
|
||||
Mode: org.Mode,
|
||||
Type: org.Type,
|
||||
Content: github.String(string(patch.Content)),
|
||||
})
|
||||
}
|
||||
if cnt--; cnt == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if cnt != 0 {
|
||||
return nil, fmt.Errorf("error finding all the files (%d missing) - check the Plan: %w", cnt, err)
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
// ghFindPR returns URL of the PR if found in the given GitHub ower/repo base and any error occurred
|
||||
func ghFindPR(ctx context.Context, title, owner, repo, base, token string) (url string, err error) {
|
||||
ghc := ghClient(ctx, token)
|
||||
|
||||
// walk through the paginated list of all pull requests, from latest to older releases
|
||||
opts := &github.PullRequestListOptions{State: "all", Base: base, ListOptions: github.ListOptions{PerPage: ghListOptionsPerPage}}
|
||||
for (opts.Page+1)*ghListOptionsPerPage <= prSearchLimit {
|
||||
prs, resp, err := ghc.PullRequests.List(ctx, owner, repo, opts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, pr := range prs {
|
||||
if pr.GetTitle() == title {
|
||||
return pr.GetHTMLURL(), nil
|
||||
}
|
||||
}
|
||||
if resp.NextPage == 0 {
|
||||
break
|
||||
}
|
||||
opts.Page = resp.NextPage
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// ghReleases returns current stable release and latest rc or beta pre-release
|
||||
// from GitHub owner/repo repository, and any error;
|
||||
// if latest pre-release version is lower than current stable release, then it
|
||||
// will return current stable release for both
|
||||
func ghReleases(ctx context.Context, owner, repo, token string) (stable, latest string, err error) {
|
||||
ghc := ghClient(ctx, token)
|
||||
|
||||
// walk through the paginated list of all owner/repo releases, from newest to oldest
|
||||
opts := &github.ListOptions{PerPage: ghListOptionsPerPage}
|
||||
for {
|
||||
rls, resp, err := ghc.Repositories.ListReleases(ctx, owner, repo, opts)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
for _, rl := range rls {
|
||||
ver := rl.GetName()
|
||||
if ver == "" {
|
||||
continue
|
||||
}
|
||||
// check if ver version is a release (ie, 'v1.19.2') or a
|
||||
// pre-release (ie, 'v1.19.3-rc.0' or 'v1.19.0-beta.2') channel ch
|
||||
// note: github.RepositoryRelease GetPrerelease() bool would be useful for all pre-rels
|
||||
ch := strings.Split(ver, "-")
|
||||
if len(ch) == 1 && stable == "" {
|
||||
stable = ver
|
||||
} else if len(ch) > 1 && latest == "" {
|
||||
if strings.HasPrefix(ch[1], "rc") || strings.HasPrefix(ch[1], "beta") {
|
||||
latest = ver
|
||||
}
|
||||
}
|
||||
if stable != "" && latest != "" {
|
||||
// make sure that v.Latest >= stable
|
||||
if latest < stable {
|
||||
latest = stable
|
||||
}
|
||||
return stable, latest, nil
|
||||
}
|
||||
}
|
||||
if resp.NextPage == 0 {
|
||||
break
|
||||
}
|
||||
opts.Page = resp.NextPage
|
||||
}
|
||||
return stable, latest, nil
|
||||
}
|
||||
|
||||
// ghClient returns GitHub Client with a given context and optional token for authenticated requests
|
||||
func ghClient(ctx context.Context, token string) *github.Client {
|
||||
if token == "" {
|
||||
return github.NewClient(nil)
|
||||
}
|
||||
ts := oauth2.StaticTokenSource(
|
||||
&oauth2.Token{AccessToken: token},
|
||||
)
|
||||
tc := oauth2.NewClient(ctx, ts)
|
||||
return github.NewClient(tc)
|
||||
}
|
||||
|
||||
// thePlan parses and returns updated plan replacing template placeholders with concrete data values
|
||||
func thePlan(plan map[string]Patch, data Data) (prettyprint string) {
|
||||
for _, p := range plan {
|
||||
for src, dst := range p.Replace {
|
||||
tmpl := template.Must(template.New("").Parse(dst))
|
||||
buf := new(bytes.Buffer)
|
||||
if err := tmpl.Execute(buf, data); err != nil {
|
||||
klog.Fatalf("Error parsing the Plan: %v", err)
|
||||
return fmt.Sprintf("%+v", plan)
|
||||
}
|
||||
p.Replace[src] = buf.String()
|
||||
}
|
||||
}
|
||||
str, err := json.MarshalIndent(plan, "", " ")
|
||||
if err != nil {
|
||||
klog.Fatalf("Error parsing the Plan: %v", err)
|
||||
return fmt.Sprintf("%+v", plan)
|
||||
}
|
||||
return string(str)
|
||||
}
|
||||
|
|
@ -22,8 +22,8 @@ import (
|
|||
"path/filepath"
|
||||
)
|
||||
|
||||
// fsUpdate updates local filesystem repo files according to the given schema and data,
|
||||
// returns if the update actually changed anything, and any error occurred
|
||||
// fsUpdate updates local filesystem repo files according to the given schema and data.
|
||||
// Returns if the update actually changed anything, and any error occurred.
|
||||
func fsUpdate(fsRoot string, schema map[string]Item, data interface{}) (changed bool, err error) {
|
||||
for path, item := range schema {
|
||||
path = filepath.Join(fsRoot, path)
|
||||
|
|
|
|||
|
|
@ -31,11 +31,11 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// ghListPerPage uses max value (100) for PerPage to avoid hitting the rate limits
|
||||
// ghListPerPage uses max value (100) for PerPage to avoid hitting the rate limits.
|
||||
// (ref: https://godoc.org/github.com/google/go-github/github#hdr-Rate_Limiting)
|
||||
ghListPerPage = 100
|
||||
|
||||
// ghSearchLimit limits the number of searched items to be <= N * ListPerPage
|
||||
// ghSearchLimit limits the number of searched items to be <= N * ghListPerPage.
|
||||
ghSearchLimit = 100
|
||||
)
|
||||
|
||||
|
|
@ -44,38 +44,38 @@ var (
|
|||
ghToken = os.Getenv("GITHUB_TOKEN")
|
||||
ghOwner = "kubernetes"
|
||||
ghRepo = "minikube"
|
||||
ghBase = "master" // could be "main" in the future?
|
||||
ghBase = "master" // could be "main" in the near future?
|
||||
)
|
||||
|
||||
// ghCreatePR returns PR created in the GitHub owner/repo, applying the changes to the base head
|
||||
// commit fork, as defined by the schema and data, and also returns any error occurred
|
||||
// PR branch will be named by the branch, sufixed by '_' and first 7 characters of fork commit SHA
|
||||
// PR itself will be named by the title and will reference the issue
|
||||
// ghCreatePR returns PR created in the GitHub owner/repo, applying the changes to the base head commit fork, as defined by the schema and data.
|
||||
// Returns any error occurred.
|
||||
// PR branch will be named by the branch, sufixed by '_' and first 7 characters of the fork commit SHA.
|
||||
// PR itself will be named by the title and will reference the issue.
|
||||
func ghCreatePR(ctx context.Context, owner, repo, base, branch, title string, issue int, token string, schema map[string]Item, data interface{}) (*github.PullRequest, error) {
|
||||
ghc := ghClient(ctx, token)
|
||||
|
||||
// get base branch
|
||||
baseBranch, _, err := ghc.Repositories.GetBranch(ctx, owner, repo, base)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting base branch: %w", err)
|
||||
return nil, fmt.Errorf("unable to get base branch: %w", err)
|
||||
}
|
||||
|
||||
// get base commit
|
||||
baseCommit, _, err := ghc.Repositories.GetCommit(ctx, owner, repo, *baseBranch.Commit.SHA)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting base commit: %w", err)
|
||||
return nil, fmt.Errorf("unable to get base commit: %w", err)
|
||||
}
|
||||
|
||||
// get base tree
|
||||
baseTree, _, err := ghc.Git.GetTree(ctx, owner, repo, baseCommit.GetSHA(), true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting base tree: %w", err)
|
||||
return nil, fmt.Errorf("unable to get base tree: %w", err)
|
||||
}
|
||||
|
||||
// update files
|
||||
changes, err := ghUpdate(ctx, owner, repo, baseTree, token, schema, data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error updating files: %w", err)
|
||||
return nil, fmt.Errorf("unable to update files: %w", err)
|
||||
}
|
||||
if changes == nil {
|
||||
return nil, nil
|
||||
|
|
@ -83,22 +83,21 @@ func ghCreatePR(ctx context.Context, owner, repo, base, branch, title string, is
|
|||
|
||||
// create fork
|
||||
fork, resp, err := ghc.Repositories.CreateFork(ctx, owner, repo, nil)
|
||||
// https://pkg.go.dev/github.com/google/go-github/v32@v32.1.0/github#RepositoriesService.CreateFork
|
||||
// This method might return an *AcceptedError and a status code of 202. This is because this is
|
||||
// the status that GitHub returns to signify that it is now computing creating the fork in a
|
||||
// background task. In this event, the Repository value will be returned, which includes the
|
||||
// details about the pending fork. A follow up request, after a delay of a second or so, should
|
||||
// result in a successful request.
|
||||
// "This method might return an *AcceptedError and a status code of 202.
|
||||
// This is because this is the status that GitHub returns to signify that it is now computing creating the fork in a background task.
|
||||
// In this event, the Repository value will be returned, which includes the details about the pending fork.
|
||||
// A follow up request, after a delay of a second or so, should result in a successful request."
|
||||
// (ref: https://pkg.go.dev/github.com/google/go-github/v32@v32.1.0/github#RepositoriesService.CreateFork)
|
||||
if resp.StatusCode == 202 { // *AcceptedError
|
||||
time.Sleep(time.Second * 5)
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("error creating fork: %w", err)
|
||||
return nil, fmt.Errorf("unable to create fork: %w", err)
|
||||
}
|
||||
|
||||
// create fork tree from base and changed files
|
||||
forkTree, _, err := ghc.Git.CreateTree(ctx, *fork.Owner.Login, *fork.Name, *baseTree.SHA, changes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating fork tree: %w", err)
|
||||
return nil, fmt.Errorf("unable to create fork tree: %w", err)
|
||||
}
|
||||
|
||||
// create fork commit
|
||||
|
|
@ -108,9 +107,9 @@ func ghCreatePR(ctx context.Context, owner, repo, base, branch, title string, is
|
|||
Parents: []*github.Commit{{SHA: baseCommit.SHA}},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating fork commit: %w", err)
|
||||
return nil, fmt.Errorf("unable to create fork commit: %w", err)
|
||||
}
|
||||
klog.Infof("PR commit '%s' created: %s", forkCommit.GetSHA(), forkCommit.GetHTMLURL())
|
||||
klog.Infof("PR commit '%s' successfully created: %s", forkCommit.GetSHA(), forkCommit.GetHTMLURL())
|
||||
|
||||
// create PR branch
|
||||
prBranch := branch + forkCommit.GetSHA()[:7]
|
||||
|
|
@ -122,74 +121,34 @@ func ghCreatePR(ctx context.Context, owner, repo, base, branch, title string, is
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating PR branch: %w", err)
|
||||
return nil, fmt.Errorf("unable to create PR branch: %w", err)
|
||||
}
|
||||
klog.Infof("PR branch '%s' created: %s", prBranch, prRef.GetURL())
|
||||
klog.Infof("PR branch '%s' successfully created: %s", prBranch, prRef.GetURL())
|
||||
|
||||
// create PR
|
||||
plan, err := GetPlan(schema, data)
|
||||
if err != nil {
|
||||
klog.Fatalf("Error parsing schema: %v\n%s", err, plan)
|
||||
klog.Fatalf("Unable to parse schema: %v\n%s", err, plan)
|
||||
}
|
||||
modifiable := true
|
||||
pr, _, err := ghc.PullRequests.Create(ctx, owner, repo, &github.NewPullRequest{
|
||||
Title: github.String(title),
|
||||
Head: github.String(*fork.Owner.Login + ":" + prBranch),
|
||||
Base: github.String(base),
|
||||
Body: github.String(fmt.Sprintf("fixes #%d\n\nAutomatically created PR to update repo according to the Plan:\n\n```\n%s\n```", issue, plan)),
|
||||
Body: github.String(fmt.Sprintf("fixes: #%d\n\nAutomatically created PR to update repo according to the Plan:\n\n```\n%s\n```", issue, plan)),
|
||||
MaintainerCanModify: &modifiable,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating pull request: %w", err)
|
||||
return nil, fmt.Errorf("unable to create PR: %w", err)
|
||||
}
|
||||
return pr, nil
|
||||
}
|
||||
|
||||
// ghUpdate updates remote GitHub owner/repo tree according to the given token, schema and data,
|
||||
// returns resulting changes, and any error occurred
|
||||
func ghUpdate(ctx context.Context, owner, repo string, tree *github.Tree, token string, schema map[string]Item, data interface{}) (changes []*github.TreeEntry, err error) {
|
||||
ghc := ghClient(ctx, token)
|
||||
|
||||
// load each schema item content and update it creating new GitHub TreeEntries
|
||||
cnt := len(schema) // expected number of files to change
|
||||
for _, org := range tree.Entries {
|
||||
if *org.Type == "blob" {
|
||||
if item, match := schema[*org.Path]; match {
|
||||
blob, _, err := ghc.Git.GetBlobRaw(ctx, owner, repo, *org.SHA)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting file: %w", err)
|
||||
}
|
||||
item.Content = blob
|
||||
changed, err := item.apply(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error updating file: %w", err)
|
||||
}
|
||||
if changed {
|
||||
// add github.TreeEntry that will replace original path content with updated one
|
||||
changes = append(changes, &github.TreeEntry{
|
||||
Path: org.Path,
|
||||
Mode: org.Mode,
|
||||
Type: org.Type,
|
||||
Content: github.String(string(item.Content)),
|
||||
})
|
||||
}
|
||||
if cnt--; cnt == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if cnt != 0 {
|
||||
return nil, fmt.Errorf("error finding all the files (%d missing) - check the Plan: %w", cnt, err)
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
// ghFindPR returns URL of the PR if found in the given GitHub ower/repo base and any error occurred
|
||||
// ghFindPR returns URL of the PR if found in the given GitHub ower/repo base and any error occurred.
|
||||
func ghFindPR(ctx context.Context, title, owner, repo, base, token string) (url string, err error) {
|
||||
ghc := ghClient(ctx, token)
|
||||
|
||||
// walk through the paginated list of all pull requests, from latest to older releases
|
||||
// walk through the paginated list of up to ghSearchLimit newest pull requests
|
||||
opts := &github.PullRequestListOptions{State: "all", Base: base, ListOptions: github.ListOptions{PerPage: ghListPerPage}}
|
||||
for (opts.Page+1)*ghListPerPage <= ghSearchLimit {
|
||||
prs, resp, err := ghc.PullRequests.List(ctx, owner, repo, opts)
|
||||
|
|
@ -209,28 +168,54 @@ func ghFindPR(ctx context.Context, title, owner, repo, base, token string) (url
|
|||
return "", nil
|
||||
}
|
||||
|
||||
// ghClient returns GitHub Client with a given context and optional token for authenticated requests
|
||||
func ghClient(ctx context.Context, token string) *github.Client {
|
||||
if token == "" {
|
||||
return github.NewClient(nil)
|
||||
// ghUpdate updates remote GitHub owner/repo tree according to the given token, schema and data.
|
||||
// Returns resulting changes, and any error occurred.
|
||||
func ghUpdate(ctx context.Context, owner, repo string, tree *github.Tree, token string, schema map[string]Item, data interface{}) (changes []*github.TreeEntry, err error) {
|
||||
ghc := ghClient(ctx, token)
|
||||
|
||||
// load each schema item content and update it creating new GitHub TreeEntries
|
||||
cnt := len(schema) // expected number of files to change
|
||||
for _, org := range tree.Entries {
|
||||
if *org.Type == "blob" {
|
||||
if item, match := schema[*org.Path]; match {
|
||||
blob, _, err := ghc.Git.GetBlobRaw(ctx, owner, repo, *org.SHA)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get file: %w", err)
|
||||
}
|
||||
item.Content = blob
|
||||
changed, err := item.apply(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to update file: %w", err)
|
||||
}
|
||||
if changed {
|
||||
// add github.TreeEntry that will replace original path content with the updated one
|
||||
changes = append(changes, &github.TreeEntry{
|
||||
Path: org.Path,
|
||||
Mode: org.Mode,
|
||||
Type: org.Type,
|
||||
Content: github.String(string(item.Content)),
|
||||
})
|
||||
}
|
||||
if cnt--; cnt == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ts := oauth2.StaticTokenSource(
|
||||
&oauth2.Token{AccessToken: token},
|
||||
)
|
||||
tc := oauth2.NewClient(ctx, ts)
|
||||
return github.NewClient(tc)
|
||||
if cnt != 0 {
|
||||
return nil, fmt.Errorf("unable to find all the files (%d missing) - check the Plan: %w", cnt, err)
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
// GHVersions returns greatest current stable release and greatest latest rc or beta pre-release
|
||||
// from GitHub owner/repo repository, and any error;
|
||||
// if latest pre-release version is lower than current stable release, then it
|
||||
// will return current stable release for both
|
||||
func GHVersions(ctx context.Context, owner, repo string) (stable, latest string, err error) {
|
||||
// GHReleases returns greatest current stable release and greatest latest rc or beta pre-release from GitHub owner/repo repository, and any error occurred.
|
||||
// If latest pre-release version is lower than the current stable release, then it will return current stable release for both.
|
||||
func GHReleases(ctx context.Context, owner, repo string) (stable, latest string, err error) {
|
||||
ghc := ghClient(ctx, ghToken)
|
||||
|
||||
// walk through the paginated list of all owner/repo releases, from newest to oldest
|
||||
// walk through the paginated list of up to ghSearchLimit newest releases
|
||||
opts := &github.ListOptions{PerPage: ghListPerPage}
|
||||
for {
|
||||
for (opts.Page+1)*ghListPerPage <= ghSearchLimit {
|
||||
rls, resp, err := ghc.Repositories.ListReleases(ctx, owner, repo, opts)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
|
|
@ -259,3 +244,15 @@ func GHVersions(ctx context.Context, owner, repo string) (stable, latest string,
|
|||
}
|
||||
return stable, latest, nil
|
||||
}
|
||||
|
||||
// ghClient returns GitHub Client with a given context and optional token for authenticated requests.
|
||||
func ghClient(ctx context.Context, token string) *github.Client {
|
||||
if token == "" {
|
||||
return github.NewClient(nil)
|
||||
}
|
||||
ts := oauth2.StaticTokenSource(
|
||||
&oauth2.Token{AccessToken: token},
|
||||
)
|
||||
tc := oauth2.NewClient(ctx, ts)
|
||||
return github.NewClient(tc)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/minikube/hack/update"
|
||||
)
|
||||
|
||||
|
|
@ -87,13 +88,13 @@ var (
|
|||
},
|
||||
}
|
||||
|
||||
// pull request data
|
||||
// PR data
|
||||
prBranchPrefix = "update-golang-version_" // will be appended with first 7 characters of the PR commit SHA
|
||||
prTitle = `update_golang_version: {stable:"{{.StableVersion}}"}`
|
||||
prTitle = `update_golang_version: {stable: "{{.StableVersion}}"}`
|
||||
prIssue = 9264
|
||||
)
|
||||
|
||||
// Data holds stable Golang version
|
||||
// Data holds stable Golang version - in full and in <major>.<minor> format
|
||||
type Data struct {
|
||||
StableVersion string `json:"stableVersion"`
|
||||
StableVersionMM string `json:"stableVersionMM"` // go.mod wants go version in <major>.<minor> format
|
||||
|
|
@ -107,7 +108,7 @@ func main() {
|
|||
// get Golang stable version
|
||||
stable, stableMM, err := goVersions()
|
||||
if err != nil || stable == "" || stableMM == "" {
|
||||
klog.Fatalf("Error getting Golang stable version: %v", err)
|
||||
klog.Fatalf("Unable to get Golang stable version: %v", err)
|
||||
}
|
||||
data := Data{StableVersion: stable, StableVersionMM: stableMM}
|
||||
klog.Infof("Golang stable version: %s", data.StableVersion)
|
||||
|
|
@ -115,7 +116,7 @@ func main() {
|
|||
update.Apply(ctx, schema, data, prBranchPrefix, prTitle, prIssue)
|
||||
}
|
||||
|
||||
// goVersion returns Golang stable version
|
||||
// goVersion returns Golang stable version.
|
||||
func goVersions() (stable, stableMM string, err error) {
|
||||
resp, err := http.Get("https://golang.org/VERSION?m=text")
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -55,6 +55,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/minikube/hack/update"
|
||||
)
|
||||
|
||||
|
|
@ -75,13 +76,13 @@ var (
|
|||
},
|
||||
}
|
||||
|
||||
// pull request data
|
||||
// PR data
|
||||
prBranchPrefix = "update-kicbase-version_" // will be appended with first 7 characters of the PR commit SHA
|
||||
prTitle = `update-kicbase-version: {"{{.StableVersion}}"}`
|
||||
prTitle = `update-kicbase-version: {stable: "{{.StableVersion}}"}`
|
||||
prIssue = 9420
|
||||
)
|
||||
|
||||
// Data holds current and stable KIC Base image versions
|
||||
// Data holds current and stable KIC base image versions
|
||||
type Data struct {
|
||||
CurrentVersion string `json:"CurrentVersion"`
|
||||
StableVersion string `json:"StableVersion"`
|
||||
|
|
@ -92,33 +93,33 @@ func main() {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), cxTimeout)
|
||||
defer cancel()
|
||||
|
||||
// determine current and stable kic base image versions
|
||||
// determine current and stable KIC base image versions
|
||||
current, stable, err := KICVersions()
|
||||
if err != nil {
|
||||
klog.Fatalf("failed getting kic base image versions: %v", err)
|
||||
klog.Fatalf("Unable to get KIC base image versions: %v", err)
|
||||
}
|
||||
if len(current) == 0 || len(stable) == 0 {
|
||||
klog.Fatalf("cannot determine kic base image versions")
|
||||
klog.Fatalf("Unable to determine KIC base image versions")
|
||||
}
|
||||
data := Data{CurrentVersion: current, StableVersion: stable}
|
||||
klog.Infof("kic base image versions: 'current' is %s and 'stable' would be %s", data.CurrentVersion, data.StableVersion)
|
||||
klog.Infof("KIC base image versions: 'current' is %s and 'stable' would be %s", data.CurrentVersion, data.StableVersion)
|
||||
|
||||
// prepare local kic base image
|
||||
// prepare local KIC base image
|
||||
image, err := prepareImage(ctx, data)
|
||||
if err != nil {
|
||||
klog.Fatalf("failed preparing local kic base reference image: %v", err)
|
||||
klog.Fatalf("Unable to prepare local KIC base reference image: %v", err)
|
||||
}
|
||||
klog.Infof("local kic base reference image: %s", image)
|
||||
klog.Infof("Local KIC base reference image: %s", image)
|
||||
|
||||
// update registries
|
||||
if updated := update.CRUpdateAll(ctx, image, data.StableVersion); !updated {
|
||||
klog.Fatalf("failed updating all registries")
|
||||
klog.Fatalf("Unable to update any registry")
|
||||
}
|
||||
|
||||
update.Apply(ctx, schema, data, prBranchPrefix, prTitle, prIssue)
|
||||
}
|
||||
|
||||
// KICVersions returns current and stable kic base image versions and any error
|
||||
// KICVersions returns current and stable KIC base image versions and any error occurred.
|
||||
func KICVersions() (current, stable string, err error) {
|
||||
blob, err := ioutil.ReadFile(filepath.Join(update.FSRoot, kicFile))
|
||||
if err != nil {
|
||||
|
|
@ -134,8 +135,8 @@ func KICVersions() (current, stable string, err error) {
|
|||
return current, stable, nil
|
||||
}
|
||||
|
||||
// prepareImage checks if current image exists locally, tries to pull it if not,
|
||||
// tags it with release version, returns reference image url and any error
|
||||
// prepareImage checks if current image exists locally, tries to pull it if not, tags it with release version.
|
||||
// Returns reference image url and any error occurred.
|
||||
func prepareImage(ctx context.Context, data Data) (image string, err error) {
|
||||
image, err = update.PullImage(ctx, data.CurrentVersion, data.StableVersion)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/minikube/hack/update"
|
||||
)
|
||||
|
||||
|
|
@ -55,13 +56,13 @@ var (
|
|||
},
|
||||
}
|
||||
|
||||
// pull request data
|
||||
// PR data
|
||||
prBranchPrefix = "update-kubernetes-version_" // will be appended with first 7 characters of the PR commit SHA
|
||||
prTitle = `update_kubernetes_version: {stable:"{{.StableVersion}}", latest:"{{.LatestVersion}}"}`
|
||||
prTitle = `update_kubernetes_version: {stable: "{{.StableVersion}}", latest: "{{.LatestVersion}}"}`
|
||||
prIssue = 4392
|
||||
)
|
||||
|
||||
// Data holds stable and latest Kubernetes versions
|
||||
// Data holds greatest current stable release and greatest latest rc or beta pre-release Kubernetes versions
|
||||
type Data struct {
|
||||
StableVersion string `json:"StableVersion"`
|
||||
LatestVersion string `json:"LatestVersion"`
|
||||
|
|
@ -73,9 +74,9 @@ func main() {
|
|||
defer cancel()
|
||||
|
||||
// get Kubernetes versions from GitHub Releases
|
||||
stable, latest, err := update.GHVersions(ctx, "kubernetes", "kubernetes")
|
||||
stable, latest, err := update.GHReleases(ctx, "kubernetes", "kubernetes")
|
||||
if err != nil || stable == "" || latest == "" {
|
||||
klog.Fatalf("Error getting Kubernetes versions: %v", err)
|
||||
klog.Fatalf("Unable to get Kubernetes versions: %v", err)
|
||||
}
|
||||
data := Data{StableVersion: stable, LatestVersion: latest}
|
||||
klog.Infof("Kubernetes versions: 'stable' is %s and 'latest' is %s", data.StableVersion, data.LatestVersion)
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
// keep list of registries in sync with those in "pkg/drivers/kic/types.go"
|
||||
// list of registries - keep it in sync with those in "pkg/drivers/kic/types.go"
|
||||
registries = []registry{
|
||||
{
|
||||
name: "Google Cloud Container Registry",
|
||||
|
|
@ -51,7 +51,7 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
// container registry name, image path, credentials, and updated flag
|
||||
// registry contains a container registry name, image path, and credentials.
|
||||
type registry struct {
|
||||
name string
|
||||
image string
|
||||
|
|
@ -59,44 +59,52 @@ type registry struct {
|
|||
password string
|
||||
}
|
||||
|
||||
// crUpdate tags image with version, pushes it to container registry, and returns any error
|
||||
func crUpdate(ctx context.Context, reg registry, image, version string) error {
|
||||
login := exec.CommandContext(ctx, "docker", "login", "--username", reg.username, "--password-stdin", reg.image)
|
||||
if err := RunWithRetryNotify(ctx, login, strings.NewReader(reg.password), 1*time.Minute, 10); err != nil {
|
||||
return fmt.Errorf("failed logging in to %s: %w", reg.name, err)
|
||||
}
|
||||
klog.Infof("successfully logged in to %s", reg.name)
|
||||
|
||||
tag := exec.CommandContext(ctx, "docker", "tag", image+":"+version, reg.image+":"+version)
|
||||
if err := RunWithRetryNotify(ctx, tag, nil, 1*time.Minute, 10); err != nil {
|
||||
return fmt.Errorf("failed tagging %s for %s: %w", reg.image+":"+version, reg.name, err)
|
||||
}
|
||||
klog.Infof("successfully tagged %s for %s", reg.image+":"+version, reg.name)
|
||||
|
||||
push := exec.CommandContext(ctx, "docker", "push", reg.image+":"+version)
|
||||
if err := RunWithRetryNotify(ctx, push, nil, 2*time.Minute, 10); err != nil {
|
||||
return fmt.Errorf("failed pushing %s to %s: %w", reg.image+":"+version, reg.name, err)
|
||||
}
|
||||
klog.Infof("successfully pushed %s to %s", reg.image+":"+version, reg.name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CRUpdateAll calls crUpdate for each available registry, and returns if at least one got updated
|
||||
// CRUpdateAll updates all registries, and returns if at least one got updated.
|
||||
func CRUpdateAll(ctx context.Context, image, version string) (updated bool) {
|
||||
for _, reg := range registries {
|
||||
if err := crUpdate(ctx, reg, image, version); err != nil {
|
||||
klog.Errorf("failed updating %s", reg.name)
|
||||
klog.Errorf("Unable to update %s", reg.name)
|
||||
continue
|
||||
}
|
||||
klog.Infof("successfully updated %s", reg.name)
|
||||
klog.Infof("Successfully updated %s", reg.name)
|
||||
updated = true
|
||||
}
|
||||
return updated
|
||||
}
|
||||
|
||||
// PullImage checks if current image exists locally, tries to pull it if not, and
|
||||
// returns reference image url and any error
|
||||
// crUpdate tags image with version, pushes it to container registry, and returns any error occurred.
|
||||
func crUpdate(ctx context.Context, reg registry, image, version string) error {
|
||||
login := exec.CommandContext(ctx, "docker", "login", "--username", reg.username, "--password-stdin", reg.image)
|
||||
if err := RunWithRetryNotify(ctx, login, strings.NewReader(reg.password), 1*time.Minute, 10); err != nil {
|
||||
return fmt.Errorf("unable to login to %s: %w", reg.name, err)
|
||||
}
|
||||
klog.Infof("Successfully logged in to %s", reg.name)
|
||||
|
||||
tag := exec.CommandContext(ctx, "docker", "tag", image+":"+version, reg.image+":"+version)
|
||||
if err := RunWithRetryNotify(ctx, tag, nil, 1*time.Minute, 10); err != nil {
|
||||
return fmt.Errorf("unable to tag %s for %s: %w", reg.image+":"+version, reg.name, err)
|
||||
}
|
||||
klog.Infof("Successfully tagged %s for %s", reg.image+":"+version, reg.name)
|
||||
|
||||
push := exec.CommandContext(ctx, "docker", "push", reg.image+":"+version)
|
||||
if err := RunWithRetryNotify(ctx, push, nil, 2*time.Minute, 10); err != nil {
|
||||
return fmt.Errorf("unable to push %s to %s: %w", reg.image+":"+version, reg.name, err)
|
||||
}
|
||||
klog.Infof("Successfully pushed %s to %s", reg.image+":"+version, reg.name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TagImage tags local image:current with stable version, and returns any error occurred.
|
||||
func TagImage(ctx context.Context, image, current, stable string) error {
|
||||
tag := exec.CommandContext(ctx, "docker", "tag", image+":"+current, image+":"+stable)
|
||||
if err := RunWithRetryNotify(ctx, tag, nil, 1*time.Second, 10); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PullImage checks if current image exists locally, tries to pull it if not, and returns reference image url and any error occurred.
|
||||
func PullImage(ctx context.Context, current, release string) (image string, err error) {
|
||||
// check if image exists locally
|
||||
for _, reg := range registries {
|
||||
|
|
@ -119,16 +127,7 @@ func PullImage(ctx context.Context, current, release string) (image string, err
|
|||
}
|
||||
}
|
||||
if image == "" {
|
||||
return "", fmt.Errorf("cannot find current image version tag %s locally nor in any registry", current)
|
||||
return "", fmt.Errorf("unable to find current image version tag %s locally nor in any registry", current)
|
||||
}
|
||||
return image, nil
|
||||
}
|
||||
|
||||
// TagImage tags local image:current with stable version, and returns any error
|
||||
func TagImage(ctx context.Context, image, current, stable string) error {
|
||||
tag := exec.CommandContext(ctx, "docker", "tag", image+":"+current, image+":"+stable)
|
||||
if err := RunWithRetryNotify(ctx, tag, nil, 1*time.Second, 10); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,13 +39,13 @@ import (
|
|||
"text/template"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
// FSRoot is relative (to scripts in subfolders) root folder of local filesystem repo to update
|
||||
// FSRoot is a relative (to scripts in subfolders) root folder of local filesystem repo to update
|
||||
FSRoot = "../../../"
|
||||
)
|
||||
|
||||
|
|
@ -56,9 +56,11 @@ var (
|
|||
// init klog and check general requirements
|
||||
func init() {
|
||||
klog.InitFlags(nil)
|
||||
// write log statements to stderr instead of to files
|
||||
if err := flag.Set("logtostderr", "true"); err != nil {
|
||||
fmt.Printf("Error setting 'logtostderr' klog flag: %v\n", err)
|
||||
if err := flag.Set("logtostderr", "false"); err != nil {
|
||||
klog.Warningf("Unable to set flag value for logtostderr: %v", err)
|
||||
}
|
||||
if err := flag.Set("alsologtostderr", "true"); err != nil {
|
||||
klog.Warningf("Unable to set flag value for alsologtostderr: %v", err)
|
||||
}
|
||||
flag.Parse()
|
||||
defer klog.Flush()
|
||||
|
|
@ -72,21 +74,19 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
// Item defines Content where all occurrences of each Replace map key, corresponding to
|
||||
// GitHub TreeEntry.Path and/or local filesystem repo file path (prefixed with FSRoot),
|
||||
// would be swapped with its respective actual map value (having placeholders replaced with data),
|
||||
// creating a concrete update plan.
|
||||
// Replace map keys can use RegExp and map values can use Golang Text Template
|
||||
// Item defines Content where all occurrences of each Replace map key,
|
||||
// corresponding to GitHub TreeEntry.Path and/or local filesystem repo file path (prefixed with FSRoot),
|
||||
// would be swapped with its respective actual map value (having placeholders replaced with data), creating a concrete update plan.
|
||||
// Replace map keys can use RegExp and map values can use Golang Text Template.
|
||||
type Item struct {
|
||||
Content []byte `json:"-"`
|
||||
Replace map[string]string `json:"replace"`
|
||||
}
|
||||
|
||||
// apply updates Item Content by replacing all occurrences of Replace map's keys
|
||||
// with their actual map values (with placeholders replaced with data))
|
||||
// apply updates Item Content by replacing all occurrences of Replace map's keys with their actual map values (with placeholders replaced with data).
|
||||
func (i *Item) apply(data interface{}) (changed bool, err error) {
|
||||
if i.Content == nil || i.Replace == nil {
|
||||
return false, fmt.Errorf("want something, got nothing to update")
|
||||
return false, fmt.Errorf("unable to update content: nothing to update")
|
||||
}
|
||||
org := string(i.Content)
|
||||
str := org
|
||||
|
|
@ -108,18 +108,18 @@ func (i *Item) apply(data interface{}) (changed bool, err error) {
|
|||
func Apply(ctx context.Context, schema map[string]Item, data interface{}, prBranchPrefix, prTitle string, prIssue int) {
|
||||
plan, err := GetPlan(schema, data)
|
||||
if err != nil {
|
||||
klog.Fatalf("Error parsing schema: %v\n%s", err, plan)
|
||||
klog.Fatalf("Unable to parse schema: %v\n%s", err, plan)
|
||||
}
|
||||
klog.Infof("The Plan:\n%s", plan)
|
||||
|
||||
if target == "fs" || target == "all" {
|
||||
changed, err := fsUpdate(FSRoot, schema, data)
|
||||
if err != nil {
|
||||
klog.Errorf("Error updating local repo: %v", err)
|
||||
klog.Errorf("Unable to update local repo: %v", err)
|
||||
} else if !changed {
|
||||
klog.Infof("Local repo update skipped: nothing changed")
|
||||
} else {
|
||||
klog.Infof("Local repo updated")
|
||||
klog.Infof("Local repo successfully updated")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -128,32 +128,31 @@ func Apply(ctx context.Context, schema map[string]Item, data interface{}, prBran
|
|||
tmpl := template.Must(template.New("prTitle").Parse(prTitle))
|
||||
buf := new(bytes.Buffer)
|
||||
if err := tmpl.Execute(buf, data); err != nil {
|
||||
klog.Fatalf("Error parsing PR Title: %v", err)
|
||||
klog.Fatalf("Unable to parse PR Title: %v", err)
|
||||
}
|
||||
prTitle = buf.String()
|
||||
|
||||
// check if PR already exists
|
||||
prURL, err := ghFindPR(ctx, prTitle, ghOwner, ghRepo, ghBase, ghToken)
|
||||
if err != nil {
|
||||
klog.Errorf("Error checking if PR already exists: %v", err)
|
||||
klog.Errorf("Unable to check if PR already exists: %v", err)
|
||||
} else if prURL != "" {
|
||||
klog.Infof("PR create skipped: already exists (%s)", prURL)
|
||||
} else {
|
||||
// create PR
|
||||
pr, err := ghCreatePR(ctx, ghOwner, ghRepo, ghBase, prBranchPrefix, prTitle, prIssue, ghToken, schema, data)
|
||||
if err != nil {
|
||||
klog.Fatalf("Error creating PR: %v", err)
|
||||
klog.Fatalf("Unable to create PR: %v", err)
|
||||
} else if pr == nil {
|
||||
klog.Infof("PR create skipped: nothing changed")
|
||||
} else {
|
||||
klog.Infof("PR created: %s", *pr.HTMLURL)
|
||||
klog.Infof("PR successfully created: %s", *pr.HTMLURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetPlan returns concrete plan replacing placeholders in schema with actual data values,
|
||||
// returns JSON-formatted representation of the plan and any error
|
||||
// GetPlan returns concrete plan replacing placeholders in schema with actual data values, returns JSON-formatted representation of the plan and any error occurred.
|
||||
func GetPlan(schema map[string]Item, data interface{}) (prettyprint string, err error) {
|
||||
for _, item := range schema {
|
||||
for src, dst := range item.Replace {
|
||||
|
|
|
|||
|
|
@ -19,8 +19,10 @@ package gcpauth
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
|
@ -63,6 +65,22 @@ func enableAddon(cfg *config.ClusterConfig) error {
|
|||
exit.Message(reason.InternalCredsNotFound, "Could not find any GCP credentials. Either run `gcloud auth application-default login` or set the GOOGLE_APPLICATION_CREDENTIALS environment variable to the path of your credentials file.")
|
||||
}
|
||||
|
||||
if creds.JSON == nil {
|
||||
// Cloud Shell sends credential files to an unusual location, let's check that location
|
||||
// For example, CLOUDSDK_CONFIG=/tmp/tmp.cflmvysoQE
|
||||
if e := os.Getenv("CLOUDSDK_CONFIG"); e != "" {
|
||||
credFile := path.Join(e, "application_default_credentials.json")
|
||||
b, err := ioutil.ReadFile(credFile)
|
||||
if err != nil {
|
||||
exit.Message(reason.InternalCredsNotFound, "Could not find any GCP credentials. Either run `gcloud auth application-default login` or set the GOOGLE_APPLICATION_CREDENTIALS environment variable to the path of your credentials file.")
|
||||
}
|
||||
creds.JSON = b
|
||||
} else {
|
||||
// We don't currently support authentication through the metadata server
|
||||
exit.Message(reason.InternalCredsNotFound, "Could not find any GCP credentials. Either run `gcloud auth application-default login` or set the GOOGLE_APPLICATION_CREDENTIALS environment variable to the path of your credentials file.")
|
||||
}
|
||||
}
|
||||
|
||||
f := assets.NewMemoryAssetTarget(creds.JSON, credentialsPath, "0444")
|
||||
|
||||
err = r.Copy(f)
|
||||
|
|
|
|||
|
|
@ -24,9 +24,9 @@ import (
|
|||
|
||||
const (
|
||||
// Version is the current version of kic
|
||||
Version = "v0.0.13"
|
||||
Version = "v0.0.14-snapshot2"
|
||||
// SHA of the kic base image
|
||||
baseImageSHA = "4d43acbd0050148d4bc399931f1b15253b5e73815b63a67b8ab4a5c9e523403f"
|
||||
baseImageSHA = "2bd97b482faf5b6a403ac39dd5e7c6fe2006425c6663a12f94f64f5f81a7787e"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
|||
|
|
@ -110,6 +110,8 @@ func (d *Driver) PreCommandCheck() error {
|
|||
if err != nil {
|
||||
return errors.Wrap(err, "error connecting to libvirt socket. Have you added yourself to the libvirtd group?")
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
libVersion, err := conn.GetLibVersion()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting libvirt version")
|
||||
|
|
@ -240,14 +242,6 @@ func (d *Driver) Restart() error {
|
|||
|
||||
// Start a host
|
||||
func (d *Driver) Start() (err error) {
|
||||
// if somebody/something deleted the network in the meantime,
|
||||
// we might need to recreate it. It's (nearly) a noop if the network exists.
|
||||
log.Info("Creating network...")
|
||||
err = d.createNetwork()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "creating network")
|
||||
}
|
||||
|
||||
// this call ensures that all networks are active
|
||||
log.Info("Ensuring networks are active...")
|
||||
err = d.ensureNetwork()
|
||||
|
|
@ -490,3 +484,14 @@ func (d *Driver) undefineDomain(conn *libvirt.Connect, dom *libvirt.Domain) erro
|
|||
|
||||
return dom.Undefine()
|
||||
}
|
||||
|
||||
// lvErr will return libvirt Error struct containing specific libvirt error code, domain, message and level
|
||||
func lvErr(err error) libvirt.Error {
|
||||
if err != nil {
|
||||
if lverr, ok := err.(libvirt.Error); ok {
|
||||
return lverr
|
||||
}
|
||||
return libvirt.Error{Code: libvirt.ERR_INTERNAL_ERROR, Message: "internal error"}
|
||||
}
|
||||
return libvirt.Error{Code: libvirt.ERR_OK, Message: ""}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,10 +26,12 @@ import (
|
|||
"io/ioutil"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/docker/machine/libmachine/log"
|
||||
libvirt "github.com/libvirt/libvirt-go"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
||||
// Replace with hardcoded range with CIDR
|
||||
|
|
@ -53,6 +55,7 @@ func setupNetwork(conn *libvirt.Connect, name string) error {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "checking network %s", name)
|
||||
}
|
||||
defer func() { _ = n.Free() }()
|
||||
|
||||
// always ensure autostart is set on the network
|
||||
autostart, err := n.GetAutostart()
|
||||
|
|
@ -75,7 +78,6 @@ func setupNetwork(conn *libvirt.Connect, name string) error {
|
|||
return errors.Wrapf(err, "starting network %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -99,8 +101,21 @@ func (d *Driver) ensureNetwork() error {
|
|||
|
||||
// Start the private network
|
||||
log.Infof("Ensuring network %s is active", d.PrivateNetwork)
|
||||
// retry once to recreate the network, but only if is not used by another minikube instance
|
||||
if err := setupNetwork(conn, d.PrivateNetwork); err != nil {
|
||||
return err
|
||||
log.Debugf("Network %s is inoperable, will try to recreate it: %v", d.PrivateNetwork, err)
|
||||
if err := d.deleteNetwork(); err != nil {
|
||||
return errors.Wrapf(err, "deleting inoperable network %s", d.PrivateNetwork)
|
||||
}
|
||||
log.Debugf("Successfully deleted %s network", d.PrivateNetwork)
|
||||
if err := d.createNetwork(); err != nil {
|
||||
return errors.Wrapf(err, "recreating inoperable network %s", d.PrivateNetwork)
|
||||
}
|
||||
log.Debugf("Successfully recreated %s network", d.PrivateNetwork)
|
||||
if err := setupNetwork(conn, d.PrivateNetwork); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Successfully activated %s network", d.PrivateNetwork)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -120,13 +135,16 @@ func (d *Driver) createNetwork() error {
|
|||
|
||||
// network: default
|
||||
// It is assumed that the libvirt/kvm installation has already created this network
|
||||
if _, err := conn.LookupNetworkByName(d.Network); err != nil {
|
||||
netd, err := conn.LookupNetworkByName(d.Network)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "network %s doesn't exist", d.Network)
|
||||
}
|
||||
defer func() { _ = netd.Free() }()
|
||||
|
||||
// network: private
|
||||
// Only create the private network if it does not already exist
|
||||
if _, err := conn.LookupNetworkByName(d.PrivateNetwork); err != nil {
|
||||
netp, err := conn.LookupNetworkByName(d.PrivateNetwork)
|
||||
if err != nil {
|
||||
// create the XML for the private network from our networkTmpl
|
||||
tmpl := template.Must(template.New("network").Parse(networkTmpl))
|
||||
var networkXML bytes.Buffer
|
||||
|
|
@ -141,10 +159,26 @@ func (d *Driver) createNetwork() error {
|
|||
}
|
||||
|
||||
// and finally create it
|
||||
if err := network.Create(); err != nil {
|
||||
log.Debugf("Trying to create network %s...", d.PrivateNetwork)
|
||||
create := func() error {
|
||||
if err := network.Create(); err != nil {
|
||||
return err
|
||||
}
|
||||
active, err := network.IsActive()
|
||||
if err == nil && active {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("retrying %v", err)
|
||||
}
|
||||
if err := retry.Local(create, 10*time.Second); err != nil {
|
||||
return errors.Wrapf(err, "creating network %s", d.PrivateNetwork)
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
if netp != nil {
|
||||
_ = netp.Free()
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -163,13 +197,13 @@ func (d *Driver) deleteNetwork() error {
|
|||
log.Debugf("Checking if network %s exists...", d.PrivateNetwork)
|
||||
network, err := conn.LookupNetworkByName(d.PrivateNetwork)
|
||||
if err != nil {
|
||||
if libvirtErr, ok := err.(libvirt.Error); ok && libvirtErr.Code == libvirt.ERR_NO_NETWORK {
|
||||
if lvErr(err).Code == libvirt.ERR_NO_NETWORK {
|
||||
log.Warnf("Network %s does not exist. Skipping deletion", d.PrivateNetwork)
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "failed looking for network %s", d.PrivateNetwork)
|
||||
}
|
||||
defer func() { _ = network.Free() }()
|
||||
log.Debugf("Network %s exists", d.PrivateNetwork)
|
||||
|
||||
err = d.checkDomains(conn)
|
||||
|
|
@ -178,15 +212,58 @@ func (d *Driver) deleteNetwork() error {
|
|||
}
|
||||
|
||||
// when we reach this point, it means it is safe to delete the network
|
||||
log.Debugf("Trying to destroy network %s...", d.PrivateNetwork)
|
||||
err = network.Destroy()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "network destroy")
|
||||
|
||||
// cannot destroy an inactive network - try to activate it first
|
||||
log.Debugf("Trying to reactivate network %s first (if needed)...", d.PrivateNetwork)
|
||||
activate := func() error {
|
||||
active, err := network.IsActive()
|
||||
if err == nil && active {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// inactive, try to activate
|
||||
if err := network.Create(); err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.Errorf("needs confirmation") // confirm in the next cycle
|
||||
}
|
||||
if err := retry.Local(activate, 10*time.Second); err != nil {
|
||||
log.Debugf("Reactivating network %s failed, will continue anyway...", d.PrivateNetwork)
|
||||
}
|
||||
|
||||
log.Debugf("Trying to destroy network %s...", d.PrivateNetwork)
|
||||
destroy := func() error {
|
||||
if err := network.Destroy(); err != nil {
|
||||
return err
|
||||
}
|
||||
active, err := network.IsActive()
|
||||
if err == nil && !active {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("retrying %v", err)
|
||||
}
|
||||
if err := retry.Local(destroy, 10*time.Second); err != nil {
|
||||
return errors.Wrap(err, "destroying network")
|
||||
}
|
||||
|
||||
log.Debugf("Trying to undefine network %s...", d.PrivateNetwork)
|
||||
err = network.Undefine()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "network undefine")
|
||||
undefine := func() error {
|
||||
if err := network.Undefine(); err != nil {
|
||||
return err
|
||||
}
|
||||
netp, err := conn.LookupNetworkByName(d.PrivateNetwork)
|
||||
if netp != nil {
|
||||
_ = netp.Free()
|
||||
}
|
||||
if lvErr(err).Code == libvirt.ERR_NO_NETWORK {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("retrying %v", err)
|
||||
}
|
||||
if err := retry.Local(undefine, 10*time.Second); err != nil {
|
||||
return errors.Wrap(err, "undefining network")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -272,7 +349,6 @@ func (d *Driver) lookupIP() (string, error) {
|
|||
if err != nil {
|
||||
return "", errors.Wrap(err, "getting connection and domain")
|
||||
}
|
||||
|
||||
defer conn.Close()
|
||||
|
||||
libVersion, err := conn.GetLibVersion()
|
||||
|
|
@ -294,6 +370,7 @@ func (d *Driver) lookupIPFromStatusFile(conn *libvirt.Connect) (string, error) {
|
|||
if err != nil {
|
||||
return "", errors.Wrap(err, "looking up network by name")
|
||||
}
|
||||
defer func() { _ = network.Free() }()
|
||||
|
||||
bridge, err := network.GetBridgeName()
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ var Addons = map[string]*Addon{
|
|||
MustBinAsset("deploy/addons/dashboard/dashboard-clusterrole.yaml", vmpath.GuestAddonsDir, "dashboard-clusterrole.yaml", "0640", false),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-clusterrolebinding.yaml", vmpath.GuestAddonsDir, "dashboard-clusterrolebinding.yaml", "0640", false),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-configmap.yaml", vmpath.GuestAddonsDir, "dashboard-configmap.yaml", "0640", false),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-dp.yaml", vmpath.GuestAddonsDir, "dashboard-dp.yaml", "0640", false),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-dp.yaml.tmpl", vmpath.GuestAddonsDir, "dashboard-dp.yaml", "0640", true),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-role.yaml", vmpath.GuestAddonsDir, "dashboard-role.yaml", "0640", false),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-rolebinding.yaml", vmpath.GuestAddonsDir, "dashboard-rolebinding.yaml", "0640", false),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-sa.yaml", vmpath.GuestAddonsDir, "dashboard-sa.yaml", "0640", false),
|
||||
|
|
@ -110,13 +110,13 @@ var Addons = map[string]*Addon{
|
|||
vmpath.GuestAddonsDir,
|
||||
"glusterfs-daemonset.yaml",
|
||||
"0640",
|
||||
false),
|
||||
true),
|
||||
MustBinAsset(
|
||||
"deploy/addons/storage-provisioner-gluster/heketi-deployment.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"heketi-deployment.yaml",
|
||||
"0640",
|
||||
false),
|
||||
true),
|
||||
MustBinAsset(
|
||||
"deploy/addons/storage-provisioner-gluster/storage-provisioner-glusterfile.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
|
|
@ -154,7 +154,7 @@ var Addons = map[string]*Addon{
|
|||
vmpath.GuestAddonsDir,
|
||||
"kibana-rc.yaml",
|
||||
"0640",
|
||||
false),
|
||||
true),
|
||||
MustBinAsset(
|
||||
"deploy/addons/efk/kibana-svc.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
|
|
@ -246,7 +246,7 @@ var Addons = map[string]*Addon{
|
|||
vmpath.GuestAddonsDir,
|
||||
"registry-rc.yaml",
|
||||
"0640",
|
||||
false),
|
||||
true),
|
||||
MustBinAsset(
|
||||
"deploy/addons/registry/registry-svc.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
|
|
@ -258,7 +258,7 @@ var Addons = map[string]*Addon{
|
|||
vmpath.GuestAddonsDir,
|
||||
"registry-proxy.yaml",
|
||||
"0640",
|
||||
false),
|
||||
true),
|
||||
}, false, "registry"),
|
||||
"registry-creds": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
|
|
@ -292,13 +292,13 @@ var Addons = map[string]*Addon{
|
|||
vmpath.GuestAddonsDir,
|
||||
"node-etc-hosts-update.yaml",
|
||||
"0640",
|
||||
false),
|
||||
true),
|
||||
MustBinAsset(
|
||||
"deploy/addons/registry-aliases/patch-coredns-job.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"patch-coredns-job.yaml",
|
||||
"0640",
|
||||
false),
|
||||
true),
|
||||
}, false, "registry-aliases"),
|
||||
"freshpod": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
|
|
@ -318,11 +318,11 @@ var Addons = map[string]*Addon{
|
|||
}, false, "nvidia-driver-installer"),
|
||||
"nvidia-gpu-device-plugin": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/gpu/nvidia-gpu-device-plugin.yaml",
|
||||
"deploy/addons/gpu/nvidia-gpu-device-plugin.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"nvidia-gpu-device-plugin.yaml",
|
||||
"0640",
|
||||
false),
|
||||
true),
|
||||
}, false, "nvidia-gpu-device-plugin"),
|
||||
"logviewer": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
|
|
@ -380,19 +380,19 @@ var Addons = map[string]*Addon{
|
|||
}, false, "helm-tiller"),
|
||||
"ingress-dns": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/ingress-dns/ingress-dns-pod.yaml",
|
||||
"deploy/addons/ingress-dns/ingress-dns-pod.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"ingress-dns-pod.yaml",
|
||||
"0640",
|
||||
false),
|
||||
true),
|
||||
}, false, "ingress-dns"),
|
||||
"metallb": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/metallb/metallb.yaml",
|
||||
"deploy/addons/metallb/metallb.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"metallb.yaml",
|
||||
"0640",
|
||||
false),
|
||||
true),
|
||||
MustBinAsset(
|
||||
"deploy/addons/metallb/metallb-config.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
|
|
@ -434,11 +434,11 @@ var Addons = map[string]*Addon{
|
|||
"0640",
|
||||
false),
|
||||
MustBinAsset(
|
||||
"deploy/addons/gcp-auth/gcp-auth-webhook.yaml",
|
||||
"deploy/addons/gcp-auth/gcp-auth-webhook.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"gcp-auth-webhook.yaml",
|
||||
"0640",
|
||||
false),
|
||||
true),
|
||||
}, false, "gcp-auth"),
|
||||
"volumesnapshots": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
|
|
@ -466,11 +466,11 @@ var Addons = map[string]*Addon{
|
|||
"0640",
|
||||
false),
|
||||
MustBinAsset(
|
||||
"deploy/addons/volumesnapshots/volume-snapshot-controller-deployment.yaml",
|
||||
"deploy/addons/volumesnapshots/volume-snapshot-controller-deployment.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"volume-snapshot-controller-deployment.yaml",
|
||||
"0640",
|
||||
false),
|
||||
true),
|
||||
}, false, "volumesnapshots"),
|
||||
"csi-hostpath-driver": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
|
|
|
|||
|
|
@ -145,7 +145,7 @@ func storageProvisioner(mirror string) string {
|
|||
// dashboardFrontend returns the image used for the dashboard frontend
|
||||
func dashboardFrontend(repo string) string {
|
||||
if repo == "" {
|
||||
repo = "kubernetesui"
|
||||
repo = "docker.io/kubernetesui"
|
||||
}
|
||||
// See 'kubernetes-dashboard' in deploy/addons/dashboard/dashboard-dp.yaml
|
||||
return path.Join(repo, "dashboard:v2.0.3")
|
||||
|
|
@ -154,7 +154,7 @@ func dashboardFrontend(repo string) string {
|
|||
// dashboardMetrics returns the image used for the dashboard metrics scraper
|
||||
func dashboardMetrics(repo string) string {
|
||||
if repo == "" {
|
||||
repo = "kubernetesui"
|
||||
repo = "docker.io/kubernetesui"
|
||||
}
|
||||
// See 'dashboard-metrics-scraper' in deploy/addons/dashboard/dashboard-dp.yaml
|
||||
return path.Join(repo, "metrics-scraper:v1.0.4")
|
||||
|
|
|
|||
|
|
@ -25,8 +25,8 @@ import (
|
|||
func TestAuxiliary(t *testing.T) {
|
||||
want := []string{
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v3",
|
||||
"kubernetesui/dashboard:v2.0.3",
|
||||
"kubernetesui/metrics-scraper:v1.0.4",
|
||||
"docker.io/kubernetesui/dashboard:v2.0.3",
|
||||
"docker.io/kubernetesui/metrics-scraper:v1.0.4",
|
||||
}
|
||||
got := auxiliary("")
|
||||
if diff := cmp.Diff(want, got); diff != "" {
|
||||
|
|
|
|||
|
|
@ -38,8 +38,8 @@ func TestKubeadmImages(t *testing.T) {
|
|||
"k8s.gcr.io/etcd:3.4.3-0",
|
||||
"k8s.gcr.io/pause:3.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v3",
|
||||
"kubernetesui/dashboard:v2.0.3",
|
||||
"kubernetesui/metrics-scraper:v1.0.4",
|
||||
"docker.io/kubernetesui/dashboard:v2.0.3",
|
||||
"docker.io/kubernetesui/metrics-scraper:v1.0.4",
|
||||
}},
|
||||
{"v1.16.1", "mirror.k8s.io", []string{
|
||||
"mirror.k8s.io/kube-proxy:v1.16.1",
|
||||
|
|
@ -62,8 +62,8 @@ func TestKubeadmImages(t *testing.T) {
|
|||
"k8s.gcr.io/etcd:3.3.10",
|
||||
"k8s.gcr.io/pause:3.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v3",
|
||||
"kubernetesui/dashboard:v2.0.3",
|
||||
"kubernetesui/metrics-scraper:v1.0.4",
|
||||
"docker.io/kubernetesui/dashboard:v2.0.3",
|
||||
"docker.io/kubernetesui/metrics-scraper:v1.0.4",
|
||||
}},
|
||||
{"v1.14.0", "", []string{
|
||||
"k8s.gcr.io/kube-proxy:v1.14.0",
|
||||
|
|
@ -74,8 +74,8 @@ func TestKubeadmImages(t *testing.T) {
|
|||
"k8s.gcr.io/etcd:3.3.10",
|
||||
"k8s.gcr.io/pause:3.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v3",
|
||||
"kubernetesui/dashboard:v2.0.3",
|
||||
"kubernetesui/metrics-scraper:v1.0.4",
|
||||
"docker.io/kubernetesui/dashboard:v2.0.3",
|
||||
"docker.io/kubernetesui/metrics-scraper:v1.0.4",
|
||||
}},
|
||||
{"v1.13.0", "", []string{
|
||||
"k8s.gcr.io/kube-proxy:v1.13.0",
|
||||
|
|
@ -86,8 +86,8 @@ func TestKubeadmImages(t *testing.T) {
|
|||
"k8s.gcr.io/etcd:3.2.24",
|
||||
"k8s.gcr.io/pause:3.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v3",
|
||||
"kubernetesui/dashboard:v2.0.3",
|
||||
"kubernetesui/metrics-scraper:v1.0.4",
|
||||
"docker.io/kubernetesui/dashboard:v2.0.3",
|
||||
"docker.io/kubernetesui/metrics-scraper:v1.0.4",
|
||||
}},
|
||||
{"v1.12.0", "", []string{
|
||||
"k8s.gcr.io/kube-proxy:v1.12.0",
|
||||
|
|
@ -98,8 +98,8 @@ func TestKubeadmImages(t *testing.T) {
|
|||
"k8s.gcr.io/etcd:3.2.24",
|
||||
"k8s.gcr.io/pause:3.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v3",
|
||||
"kubernetesui/dashboard:v2.0.3",
|
||||
"kubernetesui/metrics-scraper:v1.0.4",
|
||||
"docker.io/kubernetesui/dashboard:v2.0.3",
|
||||
"docker.io/kubernetesui/metrics-scraper:v1.0.4",
|
||||
}},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
|
|
|
|||
|
|
@ -71,6 +71,7 @@ type ClusterConfig struct {
|
|||
Addons map[string]bool
|
||||
VerifyComponents map[string]bool // map of components to verify and wait for after start.
|
||||
StartHostTimeout time.Duration
|
||||
ScheduledStop *ScheduledStopConfig
|
||||
ExposedPorts []string // Only used by the docker and podman driver
|
||||
}
|
||||
|
||||
|
|
@ -78,6 +79,7 @@ type ClusterConfig struct {
|
|||
type KubernetesConfig struct {
|
||||
KubernetesVersion string
|
||||
ClusterName string
|
||||
Namespace string
|
||||
APIServerName string
|
||||
APIServerNames []string
|
||||
APIServerIPs []net.IP
|
||||
|
|
@ -137,3 +139,10 @@ type VersionedExtraOption struct {
|
|||
// flag is applied to
|
||||
GreaterThanOrEqual semver.Version
|
||||
}
|
||||
|
||||
// ScheduledStopConfig contains information around scheduled stop
|
||||
// not yet used, will be used to show status of scheduled stop
|
||||
type ScheduledStopConfig struct {
|
||||
InitiationTime int64
|
||||
Duration time.Duration
|
||||
}
|
||||
|
|
|
|||
|
|
@ -68,6 +68,10 @@ const (
|
|||
MinikubeActiveDockerdEnv = "MINIKUBE_ACTIVE_DOCKERD"
|
||||
// PodmanVarlinkBridgeEnv is used for podman settings
|
||||
PodmanVarlinkBridgeEnv = "PODMAN_VARLINK_BRIDGE"
|
||||
// PodmanContainerHostEnv is used for podman settings
|
||||
PodmanContainerHostEnv = "CONTAINER_HOST"
|
||||
// PodmanContainerSSHKeyEnv is used for podman settings
|
||||
PodmanContainerSSHKeyEnv = "CONTAINER_SSHKEY"
|
||||
// MinikubeActivePodmanEnv holds the podman service that the user's shell is pointing at
|
||||
// value would be profile or empty if pointing to the user's host.
|
||||
MinikubeActivePodmanEnv = "MINIKUBE_ACTIVE_PODMAN"
|
||||
|
|
|
|||
|
|
@ -375,6 +375,7 @@ func dockerImagesPreloaded(runner command.Runner, images []string) bool {
|
|||
}
|
||||
preloadedImages := map[string]struct{}{}
|
||||
for _, i := range strings.Split(rr.Stdout.String(), "\n") {
|
||||
i = trimDockerIO(i)
|
||||
preloadedImages[i] = struct{}{}
|
||||
}
|
||||
|
||||
|
|
@ -382,6 +383,7 @@ func dockerImagesPreloaded(runner command.Runner, images []string) bool {
|
|||
|
||||
// Make sure images == imgs
|
||||
for _, i := range images {
|
||||
i = trimDockerIO(i)
|
||||
if _, ok := preloadedImages[i]; !ok {
|
||||
klog.Infof("%s wasn't preloaded", i)
|
||||
return false
|
||||
|
|
@ -390,6 +392,13 @@ func dockerImagesPreloaded(runner command.Runner, images []string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// Remove docker.io prefix since it won't be included in images names
|
||||
// when we call 'docker images'
|
||||
func trimDockerIO(name string) string {
|
||||
name = strings.TrimPrefix(name, "docker.io/")
|
||||
return name
|
||||
}
|
||||
|
||||
func dockerBoundToContainerd(runner command.Runner) bool {
|
||||
// NOTE: assumes systemd
|
||||
rr, err := runner.RunCmd(exec.Command("sudo", "systemctl", "cat", "docker.service"))
|
||||
|
|
|
|||
|
|
@ -29,10 +29,6 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
// initflag must be imported before any other minikube pkg.
|
||||
// Fix for https://github.com/kubernetes/minikube/issues/4866
|
||||
_ "k8s.io/minikube/pkg/initflag"
|
||||
|
||||
"github.com/golang-collections/collections/stack"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/util/lock"
|
||||
|
|
|
|||
|
|
@ -33,6 +33,9 @@ type Settings struct {
|
|||
// The name of the cluster for this context
|
||||
ClusterName string
|
||||
|
||||
// The name of the namespace for this context
|
||||
Namespace string
|
||||
|
||||
// ClusterServerAddress is the address of the Kubernetes cluster
|
||||
ClusterServerAddress string
|
||||
|
||||
|
|
@ -104,6 +107,7 @@ func PopulateFromSettings(cfg *Settings, apiCfg *api.Config) error {
|
|||
contextName := cfg.ClusterName
|
||||
context := api.NewContext()
|
||||
context.Cluster = cfg.ClusterName
|
||||
context.Namespace = cfg.Namespace
|
||||
context.AuthInfo = userName
|
||||
apiCfg.Contexts[contextName] = context
|
||||
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ package localpath
|
|||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
|
@ -86,6 +87,11 @@ func ClientCert(name string) string {
|
|||
return new
|
||||
}
|
||||
|
||||
// PID returns the path to the pid file used by profile for scheduled stop
|
||||
func PID(profile string) string {
|
||||
return path.Join(Profile(profile), "pid")
|
||||
}
|
||||
|
||||
// ClientKey returns client certificate path, used by kubeconfig
|
||||
func ClientKey(name string) string {
|
||||
new := filepath.Join(Profile(name), "client.key")
|
||||
|
|
|
|||
|
|
@ -80,6 +80,7 @@ func trySSHPowerOff(h *host.Host) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
register.Reg.SetStep(register.PowerOff)
|
||||
out.T(style.Shutdown, `Powering off "{{.profile_name}}" via SSH ...`, out.V{"profile_name": h.Name})
|
||||
// differnet for kic because RunSSHCommand is not implemented by kic
|
||||
if driver.IsKIC(h.DriverName) {
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ func Partial(name string, miniHome ...string) (libmachine.API, *config.ClusterCo
|
|||
cc, err := config.Load(name, miniHome...)
|
||||
if err != nil {
|
||||
if config.IsNotExist(err) {
|
||||
out.T(style.Shrug, `There is no local cluster named "{{.cluster}}"`, out.V{"cluster": name})
|
||||
out.T(style.Shrug, `Profile "{{.cluster}}" not found. Run "minikube profile list" to view all profiles.`, out.V{"cluster": name})
|
||||
exitTip("start", name, reason.ExGuestNotFound)
|
||||
}
|
||||
exit.Error(reason.HostConfigLoad, "Error getting cluster config", err)
|
||||
|
|
@ -174,6 +174,6 @@ func ExampleCmd(cname string, action string) string {
|
|||
// exitTip returns an action tip and exits
|
||||
func exitTip(action string, profile string, code int) {
|
||||
command := ExampleCmd(profile, action)
|
||||
out.T(style.Workaround, `To fix this, run: "{{.command}}"`, out.V{"command": command})
|
||||
out.T(style.Workaround, `To start a cluster, run: "{{.command}}"`, out.V{"command": command})
|
||||
os.Exit(code)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/out/register"
|
||||
"k8s.io/minikube/pkg/minikube/reason"
|
||||
"k8s.io/minikube/pkg/minikube/style"
|
||||
)
|
||||
|
|
@ -117,6 +118,7 @@ func beginDownloadKicBaseImage(g *errgroup.Group, cc *config.ClusterConfig, down
|
|||
}
|
||||
|
||||
klog.Infof("Beginning downloading kic base image for %s with %s", cc.Driver, cc.KubernetesConfig.ContainerRuntime)
|
||||
register.Reg.SetStep(register.PullingBaseImage)
|
||||
out.T(style.Pulling, "Pulling base image ...")
|
||||
g.Go(func() error {
|
||||
baseImg := cc.KicBaseImage
|
||||
|
|
|
|||
|
|
@ -318,6 +318,7 @@ func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clu
|
|||
}
|
||||
kcs := &kubeconfig.Settings{
|
||||
ClusterName: clusterName,
|
||||
Namespace: cc.KubernetesConfig.Namespace,
|
||||
ClusterServerAddress: addr,
|
||||
ClientCertificate: localpath.ClientCert(cc.Name),
|
||||
ClientKey: localpath.ClientKey(cc.Name),
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ const (
|
|||
SelectingDriver RegStep = "Selecting Driver"
|
||||
DownloadingArtifacts RegStep = "Downloading Artifacts"
|
||||
StartingNode RegStep = "Starting Node"
|
||||
PullingBaseImage RegStep = "Pulling Base Image"
|
||||
RunningLocalhost RegStep = "Running on Localhost"
|
||||
LocalOSRelease RegStep = "Local OS Release"
|
||||
CreatingContainer RegStep = "Creating Container"
|
||||
|
|
@ -39,6 +40,7 @@ const (
|
|||
Done RegStep = "Done"
|
||||
|
||||
Stopping RegStep = "Stopping"
|
||||
PowerOff RegStep = "PowerOff"
|
||||
Deleting RegStep = "Deleting"
|
||||
Pausing RegStep = "Pausing"
|
||||
Unpausing RegStep = "Unpausing"
|
||||
|
|
@ -78,7 +80,7 @@ func init() {
|
|||
Done,
|
||||
},
|
||||
|
||||
Stopping: {Stopping, Done},
|
||||
Stopping: {Stopping, PowerOff, Done},
|
||||
Pausing: {Pausing, Done},
|
||||
Unpausing: {Unpausing, Done},
|
||||
Deleting: {Deleting, Stopping, Deleting, Done},
|
||||
|
|
@ -126,5 +128,3 @@ func (r *Register) SetStep(s RegStep) {
|
|||
|
||||
r.current = s
|
||||
}
|
||||
|
||||
// recordStep records the current step
|
||||
|
|
|
|||
|
|
@ -114,6 +114,7 @@ var (
|
|||
InternalYamlMarshal = Kind{ID: "MK_YAML_MARSHAL", ExitCode: ExProgramError}
|
||||
InternalCredsNotFound = Kind{ID: "MK_CREDENTIALS_NOT_FOUND", ExitCode: ExProgramNotFound, Style: style.Shrug}
|
||||
InternalSemverParse = Kind{ID: "MK_SEMVER_PARSE", ExitCode: ExProgramError}
|
||||
DaemonizeError = Kind{ID: "MK_DAEMONIZE", ExitCode: ExProgramError}
|
||||
|
||||
RsrcInsufficientCores = Kind{ID: "RSRC_INSUFFICIENT_CORES", ExitCode: ExInsufficientCores, Style: style.UnmetRequirement}
|
||||
RsrcInsufficientDarwinDockerCores = Kind{
|
||||
|
|
|
|||
|
|
@ -0,0 +1,91 @@
|
|||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package schedule
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/VividCortex/godaemon"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
)
|
||||
|
||||
// KillExisting kills existing scheduled stops by looking up the PID
|
||||
// of the scheduled stop from the PID file saved for the profile and killing the process
|
||||
func KillExisting(profiles []string) {
|
||||
for _, profile := range profiles {
|
||||
if err := killPIDForProfile(profile); err != nil {
|
||||
klog.Errorf("error killng PID for profile %s: %v", profile, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func killPIDForProfile(profile string) error {
|
||||
file := localpath.PID(profile)
|
||||
f, err := ioutil.ReadFile(file)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
defer func() {
|
||||
if err := os.Remove(file); err != nil {
|
||||
klog.Errorf("error deleting %s: %v, you may have to delete in manually", file, err)
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "reading %s", file)
|
||||
}
|
||||
pid, err := strconv.Atoi(string(f))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "converting %s to int", f)
|
||||
}
|
||||
p, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "finding process")
|
||||
}
|
||||
klog.Infof("killing process %v as it is an old scheduled stop", pid)
|
||||
if err := p.Kill(); err != nil {
|
||||
return errors.Wrapf(err, "killing %v", pid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func daemonize(profiles []string, duration time.Duration) error {
|
||||
_, _, err := godaemon.MakeDaemon(&godaemon.DaemonAttr{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// now that this process has daemonized, it has a new PID
|
||||
pid := os.Getpid()
|
||||
return savePIDs(pid, profiles)
|
||||
}
|
||||
|
||||
func savePIDs(pid int, profiles []string) error {
|
||||
for _, p := range profiles {
|
||||
file := localpath.PID(p)
|
||||
if err := ioutil.WriteFile(file, []byte(fmt.Sprintf("%v", pid)), 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,5 +1,7 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors All rights reserved.
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
@ -14,16 +16,20 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package initflag
|
||||
package schedule
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Workaround for "ERROR: logging before flag.Parse"
|
||||
// See: https://github.com/kubernetes/kubernetes/issues/17162
|
||||
fs := flag.NewFlagSet("", flag.ContinueOnError)
|
||||
_ = fs.Parse([]string{})
|
||||
flag.CommandLine = fs
|
||||
// KillExisting will kill existing scheduled stops
|
||||
func KillExisting(profiles []string) {
|
||||
klog.Errorf("not yet implemented for windows")
|
||||
}
|
||||
|
||||
func daemonize(profiles []string, duration time.Duration) error {
|
||||
return fmt.Errorf("not yet implemented for windows")
|
||||
}
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package schedule
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
// Daemonize daemonizes minikube so that scheduled stop happens as expected
|
||||
func Daemonize(profiles []string, duration time.Duration) error {
|
||||
// save current time and expected duration in config
|
||||
scheduledStop := &config.ScheduledStopConfig{
|
||||
InitiationTime: time.Now().Unix(),
|
||||
Duration: duration,
|
||||
}
|
||||
var daemonizeProfiles []string
|
||||
for _, p := range profiles {
|
||||
_, cc := mustload.Partial(p)
|
||||
if driver.BareMetal(cc.Driver) {
|
||||
out.WarningT("scheduled stop is not supported on the none driver, skipping scheduling")
|
||||
continue
|
||||
}
|
||||
daemonizeProfiles = append(daemonizeProfiles, p)
|
||||
cc.ScheduledStop = scheduledStop
|
||||
if err := config.SaveProfile(p, cc); err != nil {
|
||||
return errors.Wrap(err, "saving profile")
|
||||
}
|
||||
}
|
||||
|
||||
return daemonize(daemonizeProfiles, duration)
|
||||
}
|
||||
|
|
@ -104,7 +104,8 @@ func DetermineLocale() {
|
|||
|
||||
// setPreferredLanguageTag configures which language future messages should use.
|
||||
func setPreferredLanguageTag(l language.Tag) {
|
||||
klog.Infof("Setting Language to %s ...", l)
|
||||
// output message only if verbosity level is set and we still haven't got all the flags parsed in main()
|
||||
klog.V(1).Infof("Setting Language to %s ...", l)
|
||||
preferredLanguage = l
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,23 +17,19 @@ addons modifies minikube addons files using subcommands like "minikube addons en
|
|||
minikube addons SUBCOMMAND [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for addons
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -54,23 +50,19 @@ Configures the addon w/ADDON_NAME within minikube (example: minikube addons conf
|
|||
minikube addons configure ADDON_NAME [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for configure
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -91,23 +83,19 @@ Disables the addon w/ADDON_NAME within minikube (example: minikube addons disabl
|
|||
minikube addons disable ADDON_NAME [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for disable
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -128,23 +116,19 @@ Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable
|
|||
minikube addons enable ADDON_NAME [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for enable
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -166,23 +150,19 @@ Simply type addons help [path to command] for full details.
|
|||
minikube addons help [command] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for help
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -206,7 +186,6 @@ minikube addons list [flags]
|
|||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for list
|
||||
-o, --output string minikube addons list --output OUTPUT. json, list (default "list")
|
||||
```
|
||||
|
||||
|
|
@ -216,11 +195,13 @@ minikube addons list [flags]
|
|||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -245,7 +226,6 @@ minikube addons open ADDON_NAME [flags]
|
|||
|
||||
```
|
||||
--format string Format to output addons URL in. This format will be applied to each url individually and they will be printed one at a time. (default "http://{{.IP}}:{{.Port}}")
|
||||
-h, --help help for open
|
||||
--https Open the addons URL with https instead of http
|
||||
--interval int The time interval for each check that wait performs in seconds (default 1)
|
||||
--url Display the Kubernetes addons URL in the CLI instead of opening it in the default browser
|
||||
|
|
@ -258,11 +238,13 @@ minikube addons open ADDON_NAME [flags]
|
|||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
|
|
@ -13,23 +13,19 @@ Add, delete, or push a local image into minikube
|
|||
|
||||
Add, delete, or push a local image into minikube
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for cache
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -50,23 +46,19 @@ Add an image to local cache.
|
|||
minikube cache add [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for add
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -87,23 +79,19 @@ Delete an image from the local cache.
|
|||
minikube cache delete [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for delete
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -125,23 +113,19 @@ Simply type cache help [path to command] for full details.
|
|||
minikube cache help [command] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for help
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -167,7 +151,6 @@ minikube cache list [flags]
|
|||
```
|
||||
--format string Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/
|
||||
For the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate (default "{{.CacheImage}}\n")
|
||||
-h, --help help for list
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
|
@ -176,11 +159,13 @@ minikube cache list [flags]
|
|||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -201,23 +186,19 @@ reloads images previously added using the 'cache add' subcommand
|
|||
minikube cache reload [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for reload
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
|
|
@ -38,23 +38,19 @@ Outputs minikube shell completion for the given shell (bash, zsh or fish)
|
|||
minikube completion SHELL [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for completion
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
|
|
@ -47,23 +47,19 @@ Configurable fields:
|
|||
minikube config SUBCOMMAND [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for config
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -90,7 +86,6 @@ minikube config defaults PROPERTY_NAME [flags]
|
|||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for defaults
|
||||
--output string Output format. Accepted values: [json]
|
||||
```
|
||||
|
||||
|
|
@ -100,11 +95,13 @@ minikube config defaults PROPERTY_NAME [flags]
|
|||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -125,23 +122,19 @@ Returns the value of PROPERTY_NAME from the minikube config file. Can be overwr
|
|||
minikube config get PROPERTY_NAME [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for get
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -163,23 +156,19 @@ Simply type config help [path to command] for full details.
|
|||
minikube config help [command] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for help
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -201,23 +190,19 @@ Sets the PROPERTY_NAME config value to PROPERTY_VALUE
|
|||
minikube config set PROPERTY_NAME PROPERTY_VALUE [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for set
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -238,23 +223,19 @@ unsets PROPERTY_NAME from the minikube config file. Can be overwritten by flags
|
|||
minikube config unset PROPERTY_NAME [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for unset
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -280,7 +261,6 @@ minikube config view [flags]
|
|||
```
|
||||
--format string Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/
|
||||
For the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate (default "- {{.ConfigKey}}: {{.ConfigValue}}\n")
|
||||
-h, --help help for view
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
|
@ -289,11 +269,13 @@ minikube config view [flags]
|
|||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
|
|
@ -20,8 +20,7 @@ minikube dashboard [flags]
|
|||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for dashboard
|
||||
--url Display dashboard URL instead of opening a browser
|
||||
--url Display dashboard URL instead of opening a browser
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
|
@ -30,11 +29,13 @@ minikube dashboard [flags]
|
|||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ minikube delete [flags]
|
|||
|
||||
```
|
||||
--all Set flag to delete all profiles
|
||||
-h, --help help for delete
|
||||
--purge Set this flag to delete the '.minikube' folder from your user directory.
|
||||
```
|
||||
|
||||
|
|
@ -32,11 +31,13 @@ minikube delete [flags]
|
|||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ minikube docker-env [flags]
|
|||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for docker-env
|
||||
--no-proxy Add machine IP to NO_PROXY environment variable
|
||||
--shell string Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect
|
||||
-u, --unset Unset variables instead of setting them
|
||||
|
|
@ -32,11 +31,13 @@ minikube docker-env [flags]
|
|||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
|
|
@ -18,23 +18,19 @@ Simply type minikube help [path to command] for full details.
|
|||
minikube help [command] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for help
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
|
|
@ -17,23 +17,19 @@ Retrieves the IP address of the running cluster, and writes it to STDOUT.
|
|||
minikube ip [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for ip
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
|
|
@ -21,23 +21,19 @@ minikube kubectl -- get pods --namespace kube-system
|
|||
minikube kubectl [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for kubectl
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ minikube logs [flags]
|
|||
|
||||
```
|
||||
-f, --follow Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.
|
||||
-h, --help help for logs
|
||||
-n, --length int Number of lines back to go within the log (default 60)
|
||||
--node string The node to get logs from. Defaults to the primary control plane.
|
||||
--problems Show only log entries which point to known problems
|
||||
|
|
@ -33,11 +32,13 @@ minikube logs [flags]
|
|||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ minikube mount [flags] <source directory>:<target directory>
|
|||
```
|
||||
--9p-version string Specify the 9p version that the mount should use (default "9p2000.L")
|
||||
--gid string Default group id used for the mount (default "docker")
|
||||
-h, --help help for mount
|
||||
--ip string Specify the ip that the mount should be setup on
|
||||
--kill Kill the mount process spawned by minikube start
|
||||
--mode uint File permissions used for the mount (default 493)
|
||||
|
|
@ -38,11 +37,13 @@ minikube mount [flags] <source directory>:<target directory>
|
|||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
|
|
@ -17,23 +17,19 @@ Operations on nodes
|
|||
minikube node [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for node
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -59,7 +55,6 @@ minikube node add [flags]
|
|||
```
|
||||
--control-plane If true, the node added will also be a control plane in addition to a worker.
|
||||
--delete-on-failure If set, delete the current cluster if start fails and try again. Defaults to false.
|
||||
-h, --help help for add
|
||||
--worker If true, the added node will be marked for work. Defaults to true. (default true)
|
||||
```
|
||||
|
||||
|
|
@ -69,11 +64,13 @@ minikube node add [flags]
|
|||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -94,23 +91,19 @@ Deletes a node from a cluster.
|
|||
minikube node delete [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for delete
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -132,23 +125,19 @@ Simply type node help [path to command] for full details.
|
|||
minikube node help [command] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for help
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -169,23 +158,19 @@ List existing minikube nodes.
|
|||
minikube node list [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for list
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -210,7 +195,6 @@ minikube node start [flags]
|
|||
|
||||
```
|
||||
--delete-on-failure If set, delete the current cluster if start fails and try again. Defaults to false.
|
||||
-h, --help help for start
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
|
@ -219,11 +203,13 @@ minikube node start [flags]
|
|||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -244,23 +230,19 @@ Stops a node in a cluster.
|
|||
minikube node stop [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for stop
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ minikube pause [flags]
|
|||
```
|
||||
-n, ----namespaces strings namespaces to pause (default [kube-system,kubernetes-dashboard,storage-gluster,istio-operator])
|
||||
-A, --all-namespaces If set, pause all namespaces
|
||||
-h, --help help for pause
|
||||
-o, --output string Format to print stdout in. Options include: [text,json] (default "text")
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
|
@ -31,11 +31,13 @@ minikube pause [flags]
|
|||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ minikube podman-env [flags]
|
|||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for podman-env
|
||||
--shell string Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect
|
||||
-u, --unset Unset variables instead of setting them
|
||||
```
|
||||
|
|
@ -31,11 +30,13 @@ minikube podman-env [flags]
|
|||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
|
|
@ -17,23 +17,19 @@ profile sets the current minikube profile, or gets the current profile if no arg
|
|||
minikube profile [MINIKUBE_PROFILE_NAME]. You can return to the default minikube profile by running `minikube profile default` [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for profile
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -55,23 +51,19 @@ Simply type profile help [path to command] for full details.
|
|||
minikube profile help [command] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for help
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
@ -95,7 +87,6 @@ minikube profile list [flags]
|
|||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for list
|
||||
-o, --output string The output format. One of 'json', 'table' (default "table")
|
||||
```
|
||||
|
||||
|
|
@ -105,11 +96,13 @@ minikube profile list [flags]
|
|||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
|
||||
-h, --help
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level
|
||||
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue