Merge branch 'master' into gh_7422-reuse_hyperkit_driver
commit
a072e89d99
|
@ -24,7 +24,7 @@ jobs:
|
|||
stable: true
|
||||
- name: Download Dependencies
|
||||
run: go mod download
|
||||
- name: Install KVM
|
||||
- name: Install KVM
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y qemu-kvm libvirt-daemon-system libvirt-clients bridge-utils
|
||||
|
@ -64,11 +64,6 @@ jobs:
|
|||
echo workspace $GITHUB_WORKSPACE
|
||||
echo "end of debug stuff"
|
||||
echo $(which jq)
|
||||
# iso needs golang 1.11.3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.11.13'
|
||||
stable: true
|
||||
- name: Build ISO
|
||||
run: |
|
||||
whoami
|
||||
|
@ -122,11 +117,14 @@ jobs:
|
|||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
- uses: actions/upload-artifact@v1
|
||||
- name: Upload report
|
||||
uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: iso_functional_test_kvm2_ubuntu
|
||||
path: out/report
|
||||
- uses: actions/upload-artifact@v1
|
||||
- name: Upload iso.log
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: iso log
|
||||
path: out/iso.log
|
||||
|
@ -146,4 +144,4 @@ jobs:
|
|||
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
|
||||
if [ "$numPass" -eq 0 ];then echo "*** 0 Passed! ***";exit 2;fi
|
||||
if [ "$numPass" -lt 32 ];then echo "*** Failed to pass at least 32 ! ***";exit 2;fi
|
||||
if [ "$numPass" -eq 0 ];then echo "*** Passed! ***";exit 0;fi
|
||||
if [ "$numPass" -eq 0 ];then echo "*** Passed! ***";exit 0;fi
|
||||
|
|
|
@ -412,7 +412,7 @@ jobs:
|
|||
echo "---------------- ${numFail} Failures :( ----------------------------"
|
||||
echo $failedTests
|
||||
echo "-------------------------------------------------------"
|
||||
If ($numFail -gt 0){ exit 2 }
|
||||
If ($numFail -gt 0){ exit 2 }
|
||||
If ($numPass -eq 0){ exit 2 }
|
||||
If ($numPass -lt 33){ exit 2 }
|
||||
If ($numFail -eq 0){ exit 0 }
|
||||
|
@ -429,7 +429,7 @@ jobs:
|
|||
shell: powershell
|
||||
run: |
|
||||
echo $env:computerName
|
||||
ls
|
||||
ls
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
cd minikube_binaries
|
||||
ls
|
||||
|
@ -464,14 +464,14 @@ jobs:
|
|||
$docker_running = $?
|
||||
}
|
||||
Write-Output "Docker is running"
|
||||
docker system prune -f
|
||||
docker system prune -f
|
||||
- name: Info
|
||||
continue-on-error: true
|
||||
shell: powershell
|
||||
run: |
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
cd minikube_binaries
|
||||
ls
|
||||
ls
|
||||
echo $env:computername
|
||||
Get-WmiObject -class Win32_ComputerSystem
|
||||
- uses: actions/setup-go@v2
|
||||
|
@ -483,7 +483,7 @@ jobs:
|
|||
shell: powershell
|
||||
run: |
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.2.4/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
|
||||
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.2.4/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
|
||||
choco install -y kubernetes-cli
|
||||
choco install -y jq
|
||||
choco install -y caffeine
|
||||
|
@ -549,7 +549,7 @@ jobs:
|
|||
echo "---------------- ${numFail} Failures :( ----------------------------"
|
||||
echo $failedTests
|
||||
echo "-------------------------------------------------------"
|
||||
If ($numFail -gt 0){ exit 2 }
|
||||
If ($numFail -gt 0){ exit 2 }
|
||||
If ($numPass -eq 0){ exit 2 }
|
||||
If ($numPass -lt 33){ exit 2 }
|
||||
If ($numFail -eq 0){ exit 0 }
|
||||
|
@ -775,11 +775,11 @@ jobs:
|
|||
run: |
|
||||
hostname
|
||||
VBoxManage --version
|
||||
sysctl hw.physicalcpu hw.logicalcpu
|
||||
sysctl hw.physicalcpu hw.logicalcpu
|
||||
- name: Disable firewall
|
||||
run: |
|
||||
sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate off
|
||||
sudo /usr/libexec/ApplicationFirewall/socketfilterfw -k
|
||||
sudo /usr/libexec/ApplicationFirewall/socketfilterfw -k
|
||||
- name: Download Binaries
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
|
@ -966,7 +966,7 @@ jobs:
|
|||
run: |
|
||||
hostname
|
||||
VBoxManage --version
|
||||
sysctl hw.physicalcpu hw.logicalcpu
|
||||
sysctl hw.physicalcpu hw.logicalcpu
|
||||
- name: Disable firewall
|
||||
run: |
|
||||
sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate off
|
||||
|
@ -1150,7 +1150,7 @@ jobs:
|
|||
run: |
|
||||
hostname
|
||||
VBoxManage --version
|
||||
sysctl hw.physicalcpu hw.logicalcpu
|
||||
sysctl hw.physicalcpu hw.logicalcpu
|
||||
- name: Disable firewall
|
||||
run: |
|
||||
sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate off
|
||||
|
@ -1247,4 +1247,4 @@ jobs:
|
|||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: all_reports
|
||||
path: all_reports
|
||||
path: all_reports
|
|
@ -410,7 +410,7 @@ jobs:
|
|||
echo "---------------- ${numFail} Failures :( ----------------------------"
|
||||
echo $failedTests
|
||||
echo "-------------------------------------------------------"
|
||||
If ($numFail -gt 0){ exit 2 }
|
||||
If ($numFail -gt 0){ exit 2 }
|
||||
If ($numPass -eq 0){ exit 2 }
|
||||
If ($numPass -lt 33){ exit 2 }
|
||||
If ($numFail -eq 0){ exit 0 }
|
||||
|
@ -427,7 +427,7 @@ jobs:
|
|||
shell: powershell
|
||||
run: |
|
||||
echo $env:computerName
|
||||
ls
|
||||
ls
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
cd minikube_binaries
|
||||
ls
|
||||
|
@ -462,14 +462,14 @@ jobs:
|
|||
$docker_running = $?
|
||||
}
|
||||
Write-Output "Docker is running"
|
||||
docker system prune -f
|
||||
docker system prune -f
|
||||
- name: Info
|
||||
continue-on-error: true
|
||||
shell: powershell
|
||||
run: |
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
cd minikube_binaries
|
||||
ls
|
||||
ls
|
||||
echo $env:computername
|
||||
Get-WmiObject -class Win32_ComputerSystem
|
||||
- uses: actions/setup-go@v2
|
||||
|
@ -481,7 +481,7 @@ jobs:
|
|||
shell: powershell
|
||||
run: |
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.2.4/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
|
||||
(New-Object Net.WebClient).DownloadFile("https://github.com/medyagh/gopogh/releases/download/v0.2.4/gopogh.exe", "C:\ProgramData\chocolatey\bin\gopogh.exe")
|
||||
choco install -y kubernetes-cli
|
||||
choco install -y jq
|
||||
choco install -y caffeine
|
||||
|
@ -547,7 +547,7 @@ jobs:
|
|||
echo "---------------- ${numFail} Failures :( ----------------------------"
|
||||
echo $failedTests
|
||||
echo "-------------------------------------------------------"
|
||||
If ($numFail -gt 0){ exit 2 }
|
||||
If ($numFail -gt 0){ exit 2 }
|
||||
If ($numPass -eq 0){ exit 2 }
|
||||
If ($numPass -lt 33){ exit 2 }
|
||||
If ($numFail -eq 0){ exit 0 }
|
||||
|
@ -773,11 +773,11 @@ jobs:
|
|||
run: |
|
||||
hostname
|
||||
VBoxManage --version
|
||||
sysctl hw.physicalcpu hw.logicalcpu
|
||||
sysctl hw.physicalcpu hw.logicalcpu
|
||||
- name: Disable firewall
|
||||
run: |
|
||||
sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate off
|
||||
sudo /usr/libexec/ApplicationFirewall/socketfilterfw -k
|
||||
sudo /usr/libexec/ApplicationFirewall/socketfilterfw -k
|
||||
- name: Download Binaries
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
|
@ -964,7 +964,7 @@ jobs:
|
|||
run: |
|
||||
hostname
|
||||
VBoxManage --version
|
||||
sysctl hw.physicalcpu hw.logicalcpu
|
||||
sysctl hw.physicalcpu hw.logicalcpu
|
||||
- name: Disable firewall
|
||||
run: |
|
||||
sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate off
|
||||
|
@ -1148,7 +1148,7 @@ jobs:
|
|||
run: |
|
||||
hostname
|
||||
VBoxManage --version
|
||||
sysctl hw.physicalcpu hw.logicalcpu
|
||||
sysctl hw.physicalcpu hw.logicalcpu
|
||||
- name: Disable firewall
|
||||
run: |
|
||||
sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate off
|
||||
|
@ -1245,4 +1245,4 @@ jobs:
|
|||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: all_reports
|
||||
path: all_reports
|
||||
path: all_reports
|
67
CHANGELOG.md
67
CHANGELOG.md
|
@ -1,5 +1,72 @@
|
|||
# Release Notes
|
||||
|
||||
## Version 1.14.0 - 2020-10-08
|
||||
|
||||
## Features
|
||||
|
||||
* Delete context when stopped [#9414](https://github.com/kubernetes/minikube/pull/9414)
|
||||
* New flag "--ports" to expose ports for docker & podman drivers [#9404](https://github.com/kubernetes/minikube/pull/9404)
|
||||
|
||||
## Bug Fixes and minor improvements
|
||||
|
||||
* Ingress addon: fix the controller name [#9413](https://github.com/kubernetes/minikube/pull/9413)
|
||||
* docker/podman drivers: no panic when updating mount-string with no configuration [#9412](https://github.com/kubernetes/minikube/pull/9412)
|
||||
* Improve solution message when there is no space left on device [#9316](https://github.com/kubernetes/minikube/pull/9316)
|
||||
|
||||
* To see more changes checkout the last beta release notes [1.14.0-beta.0](https://github.com/kubernetes/minikube/releases/tag/v1.14.0-beta.0).
|
||||
|
||||
Thank you to our contributors for this release.
|
||||
|
||||
- Anders F Björklund
|
||||
- Asare Worae
|
||||
- Medya Ghazizadeh
|
||||
- Prajilesh N
|
||||
- Predrag Rogic
|
||||
- Priya Wadhwa
|
||||
- Thomas Strömberg
|
||||
- ToonvanStrijp
|
||||
|
||||
## Version 1.14.0-beta.0 - 2020-10-06
|
||||
|
||||
## Features
|
||||
|
||||
* add dedicated network for docker driver [#9294](https://github.com/kubernetes/minikube/pull/9294)
|
||||
* Make sure gcp-auth addon can be enabled on startup [#9318](https://github.com/kubernetes/minikube/pull/9318)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* Fix minikube status bug when cluster is paused [#9383](https://github.com/kubernetes/minikube/pull/9383)
|
||||
* don't allow profile name to be less than 2 characters [#9367](https://github.com/kubernetes/minikube/pull/9367)
|
||||
* fix: "profile list" shows paused clusters as "Running" [#8978](https://github.com/kubernetes/minikube/pull/8978)
|
||||
* Fix error in unittest, as pointed out by warning [#9345](https://github.com/kubernetes/minikube/pull/9345)
|
||||
|
||||
## Updates
|
||||
|
||||
* update kicbase image to ubuntu-based [#9353](https://github.com/kubernetes/minikube/pull/9353)
|
||||
|
||||
Thank you to our contributors for this release!
|
||||
|
||||
- Anders F Björklund
|
||||
- Bob Killen
|
||||
- Daniel Weibel
|
||||
- Dominik Braun
|
||||
- Ilya Zuyev
|
||||
- JJ Asghar
|
||||
- Jituri, Pranav
|
||||
- Medya Ghazizadeh
|
||||
- Michael Ryan Dempsey
|
||||
- Predrag Rogic
|
||||
- Priya Wadhwa
|
||||
- Sharif Elgamal
|
||||
- Tacio Costa
|
||||
- Thomas Strömberg
|
||||
- Till Hoffmann
|
||||
- loftkun
|
||||
- programistka
|
||||
- zhanwang
|
||||
|
||||
|
||||
|
||||
## Version 1.13.1 - 2020-09-18
|
||||
* Update Default Kubernetes Version to v1.19.2 [#9265](https://github.com/kubernetes/minikube/pull/9265)
|
||||
* fix mounting for docker driver in windows [#9263](https://github.com/kubernetes/minikube/pull/9263)
|
||||
|
|
53
Makefile
53
Makefile
|
@ -14,8 +14,8 @@
|
|||
|
||||
# Bump these on release - and please check ISO_VERSION for correctness.
|
||||
VERSION_MAJOR ?= 1
|
||||
VERSION_MINOR ?= 13
|
||||
VERSION_BUILD ?= 1
|
||||
VERSION_MINOR ?= 14
|
||||
VERSION_BUILD ?= 0
|
||||
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
|
||||
VERSION ?= v$(RAW_VERSION)
|
||||
|
||||
|
@ -23,7 +23,7 @@ KUBERNETES_VERSION ?= $(shell egrep "DefaultKubernetesVersion =" pkg/minikube/co
|
|||
KIC_VERSION ?= $(shell egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2)
|
||||
|
||||
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
|
||||
ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).1
|
||||
ISO_VERSION ?= v1.14.0
|
||||
# Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta
|
||||
DEB_VERSION ?= $(subst -,~,$(RAW_VERSION))
|
||||
RPM_VERSION ?= $(DEB_VERSION)
|
||||
|
@ -32,7 +32,7 @@ RPM_VERSION ?= $(DEB_VERSION)
|
|||
GO_VERSION ?= 1.14.6
|
||||
|
||||
INSTALL_SIZE ?= $(shell du out/minikube-windows-amd64.exe | cut -f1)
|
||||
BUILDROOT_BRANCH ?= 2019.02.11
|
||||
BUILDROOT_BRANCH ?= 2020.02.6
|
||||
REGISTRY?=gcr.io/k8s-minikube
|
||||
REGISTRY_GH?=docker.pkg.github.com/kubernetes/minikube
|
||||
|
||||
|
@ -58,7 +58,7 @@ MINIKUBE_BUCKET ?= minikube/releases
|
|||
MINIKUBE_UPLOAD_LOCATION := gs://${MINIKUBE_BUCKET}
|
||||
MINIKUBE_RELEASES_URL=https://github.com/kubernetes/minikube/releases/download
|
||||
|
||||
KERNEL_VERSION ?= 4.19.107
|
||||
KERNEL_VERSION ?= 4.19.114
|
||||
# latest from https://github.com/golangci/golangci-lint/releases
|
||||
GOLINT_VERSION ?= v1.30.0
|
||||
# Limit number of default jobs, to avoid the CI builds running out of memory
|
||||
|
@ -94,12 +94,8 @@ GVISOR_TAG ?= latest
|
|||
# storage provisioner tag to push changes to
|
||||
STORAGE_PROVISIONER_TAG ?= v3
|
||||
|
||||
# TODO: multi-arch manifest
|
||||
ifeq ($(GOARCH),amd64)
|
||||
STORAGE_PROVISIONER_IMAGE ?= $(REGISTRY)/storage-provisioner:$(STORAGE_PROVISIONER_TAG)
|
||||
else
|
||||
STORAGE_PROVISIONER_MANIFEST ?= $(REGISTRY)/storage-provisioner:$(STORAGE_PROVISIONER_TAG)
|
||||
STORAGE_PROVISIONER_IMAGE ?= $(REGISTRY)/storage-provisioner-$(GOARCH):$(STORAGE_PROVISIONER_TAG)
|
||||
endif
|
||||
|
||||
# Set the version information for the Kubernetes servers
|
||||
MINIKUBE_LDFLAGS := -X k8s.io/minikube/pkg/version.version=$(VERSION) -X k8s.io/minikube/pkg/version.isoVersion=$(ISO_VERSION) -X k8s.io/minikube/pkg/version.isoPath=$(ISO_BUCKET) -X k8s.io/minikube/pkg/version.gitCommitID=$(COMMIT) -X k8s.io/minikube/pkg/version.storageProvisionerVersion=$(STORAGE_PROVISIONER_TAG)
|
||||
|
@ -305,9 +301,15 @@ html_report: ## Generate HTML report out of the last ran integration test logs.
|
|||
@go tool test2json -t < "./out/testout_$(COMMIT_SHORT).txt" > "./out/testout_$(COMMIT_SHORT).json"
|
||||
@gopogh -in "./out/testout_$(COMMIT_SHORT).json" -out ./out/testout_$(COMMIT_SHORT).html -name "$(shell git rev-parse --abbrev-ref HEAD)" -pr "" -repo github.com/kubernetes/minikube/ -details "${COMMIT_SHORT}"
|
||||
@echo "-------------------------- Open HTML Report in Browser: ---------------------------"
|
||||
ifeq ($(GOOS),windows)
|
||||
@echo start $(CURDIR)/out/testout_$(COMMIT_SHORT).html
|
||||
@echo "-----------------------------------------------------------------------------------"
|
||||
@start $(CURDIR)/out/testout_$(COMMIT_SHORT).html || true
|
||||
else
|
||||
@echo open $(CURDIR)/out/testout_$(COMMIT_SHORT).html
|
||||
@echo "-----------------------------------------------------------------------------------"
|
||||
@open $(CURDIR)/out/testout_$(COMMIT_SHORT).html || true
|
||||
endif
|
||||
|
||||
.PHONY: test
|
||||
test: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go ## Trigger minikube test
|
||||
|
@ -331,7 +333,7 @@ pkg/minikube/assets/assets.go: $(shell find "deploy/addons" -type f)
|
|||
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
|
||||
$(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@)
|
||||
endif
|
||||
@which go-bindata >/dev/null 2>&1 || GO111MODULE=off GOBIN="$(GOPATH)$(DIRSEP)bin" go get github.com/jteeuwen/go-bindata/...
|
||||
@which go-bindata >/dev/null 2>&1 || GO111MODULE=off GOBIN="$(GOPATH)$(DIRSEP)bin" go get github.com/go-bindata/go-bindata/...
|
||||
$(if $(quiet),@echo " GEN $@")
|
||||
$(Q)PATH="$(PATH)$(PATHSEP)$(GOPATH)$(DIRSEP)bin" go-bindata -nomemcopy -o $@ -pkg assets deploy/addons/...
|
||||
$(Q)-gofmt -s -w $@
|
||||
|
@ -344,7 +346,7 @@ pkg/minikube/translate/translations.go: $(shell find "translations/" -type f)
|
|||
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
|
||||
$(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@)
|
||||
endif
|
||||
@which go-bindata >/dev/null 2>&1 || GO111MODULE=off GOBIN="$(GOPATH)$(DIRSEP)bin" go get github.com/jteeuwen/go-bindata/...
|
||||
@which go-bindata >/dev/null 2>&1 || GO111MODULE=off GOBIN="$(GOPATH)$(DIRSEP)bin" go get github.com/go-bindata/go-bindata/...
|
||||
$(if $(quiet),@echo " GEN $@")
|
||||
$(Q)PATH="$(PATH)$(PATHSEP)$(GOPATH)$(DIRSEP)bin" go-bindata -nomemcopy -o $@ -pkg translate translations/...
|
||||
$(Q)-gofmt -s -w $@
|
||||
|
@ -573,13 +575,16 @@ else
|
|||
endif
|
||||
|
||||
.PHONY: storage-provisioner-image
|
||||
storage-provisioner-image: out/storage-provisioner-$(GOARCH) ## Build storage-provisioner docker image
|
||||
docker build -t $(STORAGE_PROVISIONER_IMAGE) -f deploy/storage-provisioner/Dockerfile --build-arg arch=$(GOARCH) .
|
||||
storage-provisioner-image: storage-provisioner-image-$(GOARCH) ## Build storage-provisioner docker image
|
||||
docker tag $(REGISTRY)/storage-provisioner-$(GOARCH):$(STORAGE_PROVISIONER_TAG) $(REGISTRY)/storage-provisioner:$(STORAGE_PROVISIONER_TAG)
|
||||
|
||||
storage-provisioner-image-%: out/storage-provisioner-%
|
||||
docker build -t $(REGISTRY)/storage-provisioner-$*:$(STORAGE_PROVISIONER_TAG) -f deploy/storage-provisioner/Dockerfile --build-arg arch=$* .
|
||||
|
||||
.PHONY: kic-base-image
|
||||
kic-base-image: ## builds the base image used for kic.
|
||||
docker rmi -f $(KIC_BASE_IMAGE_GCR)-snapshot || true
|
||||
docker build -f ./deploy/kicbase/Dockerfile -t local/kicbase:$(KIC_VERSION)-snapshot --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --cache-from $(KIC_BASE_IMAGE_GCR) --target base ./deploy/kicbase
|
||||
docker build -f ./deploy/kicbase/Dockerfile -t local/kicbase:$(KIC_VERSION)-snapshot --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --cache-from $(KIC_BASE_IMAGE_GCR) ./deploy/kicbase
|
||||
docker tag local/kicbase:$(KIC_VERSION)-snapshot $(KIC_BASE_IMAGE_GCR)-snapshot
|
||||
docker tag local/kicbase:$(KIC_VERSION)-snapshot $(KIC_BASE_IMAGE_GCR)
|
||||
docker tag local/kicbase:$(KIC_VERSION)-snapshot $(KIC_BASE_IMAGE_HUB)
|
||||
|
@ -595,6 +600,18 @@ push-storage-provisioner-image: storage-provisioner-image ## Push storage-provis
|
|||
docker login gcr.io/k8s-minikube
|
||||
$(MAKE) push-docker IMAGE=$(STORAGE_PROVISIONER_IMAGE)
|
||||
|
||||
ALL_ARCH = amd64 arm arm64 ppc64le s390x
|
||||
IMAGE = $(REGISTRY)/storage-provisioner
|
||||
TAG = $(STORAGE_PROVISIONER_TAG)
|
||||
|
||||
.PHONY: push-storage-provisioner-manifest
|
||||
push-storage-provisioner-manifest: $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~storage\-provisioner\-image\-&~g")
|
||||
docker login gcr.io/k8s-minikube
|
||||
set -x; for arch in $(ALL_ARCH); do docker push ${IMAGE}-$${arch}:${TAG}; done
|
||||
docker manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g")
|
||||
set -x; for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${IMAGE}:${TAG} ${IMAGE}-$${arch}:${TAG}; done
|
||||
docker manifest push $(STORAGE_PROVISIONER_MANIFEST)
|
||||
|
||||
.PHONY: push-docker
|
||||
push-docker: # Push docker image base on to IMAGE variable
|
||||
@docker pull $(IMAGE) && echo "Image already exist in registry" && exit 1 || echo "Image doesn't exist in registry"
|
||||
|
@ -747,9 +764,9 @@ site: site/themes/docsy/assets/vendor/bootstrap/package.js out/hugo/hugo ## Serv
|
|||
out/mkcmp:
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o $@ cmd/performance/mkcmp/main.go
|
||||
|
||||
.PHONY: out/performance-monitor
|
||||
out/performance-monitor:
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o $@ cmd/performance/monitor/monitor.go
|
||||
.PHONY: out/performance-bot
|
||||
out/performance-bot:
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o $@ cmd/performance/pr-bot/bot.go
|
||||
|
||||
.PHONY: compare
|
||||
compare: out/mkcmp out/minikube
|
||||
|
|
|
@ -28,8 +28,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/reason"
|
||||
)
|
||||
|
||||
const longDescription = `
|
||||
Outputs minikube shell completion for the given shell (bash, zsh or fish)
|
||||
const longDescription = `Outputs minikube shell completion for the given shell (bash, zsh or fish)
|
||||
|
||||
This depends on the bash-completion binary. Example installation instructions:
|
||||
OS X:
|
||||
|
|
|
@ -17,8 +17,9 @@ limitations under the License.
|
|||
package config
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// AddonsCmd represents the addons command
|
||||
|
@ -28,7 +29,7 @@ var AddonsCmd = &cobra.Command{
|
|||
Long: `addons modifies minikube addons files using subcommands like "minikube addons enable dashboard"`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
glog.Errorf("help: %v", err)
|
||||
klog.Errorf("help: %v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
|
@ -23,9 +23,9 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
|
@ -114,7 +114,7 @@ var printAddonsList = func(cc *config.ClusterConfig) {
|
|||
|
||||
v, _, err := config.ListProfiles()
|
||||
if err != nil {
|
||||
glog.Errorf("list profiles returned error: %v", err)
|
||||
klog.Errorf("list profiles returned error: %v", err)
|
||||
}
|
||||
if len(v) > 1 {
|
||||
out.T(style.Tip, "To see addons list for other profiles use: `minikube addons -p name list`")
|
||||
|
|
|
@ -19,8 +19,8 @@ package config
|
|||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
|
@ -185,7 +185,7 @@ var ConfigCmd = &cobra.Command{
|
|||
Configurable fields: ` + "\n\n" + configurableFields(),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
glog.Errorf("help: %v", err)
|
||||
klog.Errorf("help: %v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
|
@ -32,8 +32,8 @@ import (
|
|||
|
||||
var addonsConfigureCmd = &cobra.Command{
|
||||
Use: "configure ADDON_NAME",
|
||||
Short: "Configures the addon w/ADDON_NAME within minikube (example: minikube addons configure registry-creds). For a list of available addons use: minikube addons list ",
|
||||
Long: "Configures the addon w/ADDON_NAME within minikube (example: minikube addons configure registry-creds). For a list of available addons use: minikube addons list ",
|
||||
Short: "Configures the addon w/ADDON_NAME within minikube (example: minikube addons configure registry-creds). For a list of available addons use: minikube addons list",
|
||||
Long: "Configures the addon w/ADDON_NAME within minikube (example: minikube addons configure registry-creds). For a list of available addons use: minikube addons list",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) != 1 {
|
||||
exit.Message(reason.Usage, "usage: minikube addons configure ADDON_NAME")
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
|
@ -31,9 +32,11 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/reason"
|
||||
"k8s.io/minikube/pkg/minikube/style"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/docker/machine/libmachine"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var output string
|
||||
|
@ -54,96 +57,114 @@ var profileListCmd = &cobra.Command{
|
|||
},
|
||||
}
|
||||
|
||||
var printProfilesTable = func() {
|
||||
var validData [][]string
|
||||
func printProfilesTable() {
|
||||
validProfiles, invalidProfiles, err := config.ListProfiles()
|
||||
|
||||
if err != nil {
|
||||
klog.Warningf("error loading profiles: %v", err)
|
||||
}
|
||||
|
||||
if len(validProfiles) == 0 {
|
||||
exit.Message(reason.Usage, "No minikube profile was found. You can create one using `minikube start`.")
|
||||
}
|
||||
|
||||
updateProfilesStatus(validProfiles)
|
||||
renderProfilesTable(profilesToTableData(validProfiles))
|
||||
warnInvalidProfiles(invalidProfiles)
|
||||
}
|
||||
|
||||
func updateProfilesStatus(profiles []*config.Profile) {
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get machine api client %v", err)
|
||||
}
|
||||
defer api.Close()
|
||||
|
||||
for _, p := range profiles {
|
||||
p.Status = profileStatus(p, api)
|
||||
}
|
||||
}
|
||||
|
||||
func profileStatus(p *config.Profile, api libmachine.API) string {
|
||||
cp, err := config.PrimaryControlPlane(p.Config)
|
||||
if err != nil {
|
||||
exit.Error(reason.GuestCpConfig, "error getting primary control plane", err)
|
||||
}
|
||||
|
||||
host, err := machine.LoadHost(api, driver.MachineName(*p.Config, cp))
|
||||
if err != nil {
|
||||
klog.Warningf("error loading profiles: %v", err)
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
cr, err := machine.CommandRunner(host)
|
||||
if err != nil {
|
||||
klog.Warningf("error loading profiles: %v", err)
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
hostname, _, port, err := driver.ControlPlaneEndpoint(p.Config, &cp, host.DriverName)
|
||||
if err != nil {
|
||||
klog.Warningf("error loading profiles: %v", err)
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
status, err := kverify.APIServerStatus(cr, hostname, port)
|
||||
if err != nil {
|
||||
klog.Warningf("error getting apiserver status for %s: %v", p.Name, err)
|
||||
return "Unknown"
|
||||
}
|
||||
return status.String()
|
||||
}
|
||||
|
||||
func renderProfilesTable(ps [][]string) {
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
table.SetHeader([]string{"Profile", "VM Driver", "Runtime", "IP", "Port", "Version", "Status"})
|
||||
table.SetAutoFormatHeaders(false)
|
||||
table.SetBorders(tablewriter.Border{Left: true, Top: true, Right: true, Bottom: true})
|
||||
table.SetCenterSeparator("|")
|
||||
validProfiles, invalidProfiles, err := config.ListProfiles()
|
||||
table.AppendBulk(ps)
|
||||
table.Render()
|
||||
}
|
||||
|
||||
if len(validProfiles) == 0 || err != nil {
|
||||
exit.Message(reason.Usage, "No minikube profile was found. You can create one using `minikube start`.")
|
||||
}
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get machine api client %v", err)
|
||||
}
|
||||
defer api.Close()
|
||||
|
||||
for _, p := range validProfiles {
|
||||
func profilesToTableData(profiles []*config.Profile) [][]string {
|
||||
var data [][]string
|
||||
for _, p := range profiles {
|
||||
cp, err := config.PrimaryControlPlane(p.Config)
|
||||
if err != nil {
|
||||
exit.Error(reason.GuestCpConfig, "error getting primary control plane", err)
|
||||
}
|
||||
p.Status, err = machine.Status(api, driver.MachineName(*p.Config, cp))
|
||||
if err != nil {
|
||||
glog.Warningf("error getting host status for %s: %v", p.Name, err)
|
||||
}
|
||||
validData = append(validData, []string{p.Name, p.Config.Driver, p.Config.KubernetesConfig.ContainerRuntime, cp.IP, strconv.Itoa(cp.Port), p.Config.KubernetesConfig.KubernetesVersion, p.Status})
|
||||
|
||||
data = append(data, []string{p.Name, p.Config.Driver, p.Config.KubernetesConfig.ContainerRuntime, cp.IP, strconv.Itoa(cp.Port), p.Config.KubernetesConfig.KubernetesVersion, p.Status})
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func warnInvalidProfiles(invalidProfiles []*config.Profile) {
|
||||
if invalidProfiles == nil {
|
||||
return
|
||||
}
|
||||
|
||||
table.AppendBulk(validData)
|
||||
table.Render()
|
||||
|
||||
if invalidProfiles != nil {
|
||||
out.WarningT("Found {{.number}} invalid profile(s) ! ", out.V{"number": len(invalidProfiles)})
|
||||
for _, p := range invalidProfiles {
|
||||
out.ErrT(style.Empty, "\t "+p.Name)
|
||||
}
|
||||
out.ErrT(style.Tip, "You can delete them using the following command(s): ")
|
||||
for _, p := range invalidProfiles {
|
||||
out.Err(fmt.Sprintf("\t $ minikube delete -p %s \n", p.Name))
|
||||
}
|
||||
|
||||
out.WarningT("Found {{.number}} invalid profile(s) ! ", out.V{"number": len(invalidProfiles)})
|
||||
for _, p := range invalidProfiles {
|
||||
out.ErrT(style.Empty, "\t "+p.Name)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
glog.Warningf("error loading profiles: %v", err)
|
||||
out.ErrT(style.Tip, "You can delete them using the following command(s): ")
|
||||
for _, p := range invalidProfiles {
|
||||
out.Err(fmt.Sprintf("\t $ minikube delete -p %s \n", p.Name))
|
||||
}
|
||||
}
|
||||
|
||||
var printProfilesJSON = func() {
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get machine api client %v", err)
|
||||
}
|
||||
defer api.Close()
|
||||
|
||||
func printProfilesJSON() {
|
||||
validProfiles, invalidProfiles, err := config.ListProfiles()
|
||||
for _, v := range validProfiles {
|
||||
cp, err := config.PrimaryControlPlane(v.Config)
|
||||
if err != nil {
|
||||
exit.Error(reason.GuestCpConfig, "error getting primary control plane", err)
|
||||
}
|
||||
status, err := machine.Status(api, driver.MachineName(*v.Config, cp))
|
||||
if err != nil {
|
||||
glog.Warningf("error getting host status for %s: %v", v.Name, err)
|
||||
}
|
||||
v.Status = status
|
||||
}
|
||||
|
||||
var valid []*config.Profile
|
||||
var invalid []*config.Profile
|
||||
|
||||
if validProfiles != nil {
|
||||
valid = validProfiles
|
||||
} else {
|
||||
valid = []*config.Profile{}
|
||||
}
|
||||
|
||||
if invalidProfiles != nil {
|
||||
invalid = invalidProfiles
|
||||
} else {
|
||||
invalid = []*config.Profile{}
|
||||
}
|
||||
|
||||
body := map[string]interface{}{}
|
||||
updateProfilesStatus(validProfiles)
|
||||
|
||||
var body = map[string]interface{}{}
|
||||
if err == nil || config.IsNotExist(err) {
|
||||
body["valid"] = valid
|
||||
body["invalid"] = invalid
|
||||
body["valid"] = profilesOrDefault(validProfiles)
|
||||
body["invalid"] = profilesOrDefault(invalidProfiles)
|
||||
jsonString, _ := json.Marshal(body)
|
||||
out.String(string(jsonString))
|
||||
} else {
|
||||
|
@ -154,6 +175,13 @@ var printProfilesJSON = func() {
|
|||
}
|
||||
}
|
||||
|
||||
func profilesOrDefault(profiles []*config.Profile) []*config.Profile {
|
||||
if profiles != nil {
|
||||
return profiles
|
||||
}
|
||||
return []*config.Profile{}
|
||||
}
|
||||
|
||||
func init() {
|
||||
profileListCmd.Flags().StringVarP(&output, "output", "o", "table", "The output format. One of 'json', 'table'")
|
||||
ProfileCmd.AddCommand(profileListCmd)
|
||||
|
|
|
@ -23,8 +23,8 @@ import (
|
|||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
@ -127,7 +127,7 @@ func AskForPasswordValue(s string) string {
|
|||
}
|
||||
defer func() {
|
||||
if err := terminal.Restore(stdInFd, oldState); err != nil {
|
||||
glog.Errorf("terminal restore failed: %v", err)
|
||||
klog.Errorf("terminal restore failed: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
|
@ -26,9 +26,9 @@ import (
|
|||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/addons"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
"k8s.io/minikube/pkg/minikube/style"
|
||||
|
@ -61,7 +61,7 @@ var dashboardCmd = &cobra.Command{
|
|||
|
||||
for _, n := range co.Config.Nodes {
|
||||
if err := proxy.ExcludeIP(n.IP); err != nil {
|
||||
glog.Errorf("Error excluding IP from proxy: %s", err)
|
||||
klog.Errorf("Error excluding IP from proxy: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -118,9 +118,9 @@ var dashboardCmd = &cobra.Command{
|
|||
}
|
||||
}
|
||||
|
||||
glog.Infof("Success! I will now quietly sit around until kubectl proxy exits!")
|
||||
klog.Infof("Success! I will now quietly sit around until kubectl proxy exits!")
|
||||
if err = p.Wait(); err != nil {
|
||||
glog.Errorf("Wait: %v", err)
|
||||
klog.Errorf("Wait: %v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -143,12 +143,12 @@ func kubectlProxy(kubectlVersion string, contextName string) (*exec.Cmd, string,
|
|||
return nil, "", errors.Wrap(err, "cmd stdout")
|
||||
}
|
||||
|
||||
glog.Infof("Executing: %s %s", cmd.Path, cmd.Args)
|
||||
klog.Infof("Executing: %s %s", cmd.Path, cmd.Args)
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, "", errors.Wrap(err, "proxy start")
|
||||
}
|
||||
|
||||
glog.Infof("Waiting for kubectl to output host:port ...")
|
||||
klog.Infof("Waiting for kubectl to output host:port ...")
|
||||
reader := bufio.NewReader(stdoutPipe)
|
||||
|
||||
var out []byte
|
||||
|
@ -161,12 +161,12 @@ func kubectlProxy(kubectlVersion string, contextName string) (*exec.Cmd, string,
|
|||
break
|
||||
}
|
||||
if timedOut {
|
||||
glog.Infof("timed out waiting for input: possibly due to an old kubectl version.")
|
||||
klog.Infof("timed out waiting for input: possibly due to an old kubectl version.")
|
||||
break
|
||||
}
|
||||
out = append(out, r)
|
||||
}
|
||||
glog.Infof("proxy stdout: %s", string(out))
|
||||
klog.Infof("proxy stdout: %s", string(out))
|
||||
return cmd, hostPortRe.FindString(string(out)), nil
|
||||
}
|
||||
|
||||
|
@ -203,7 +203,7 @@ func dashboardURL(proxy string, ns string, svc string) string {
|
|||
// checkURL checks if a URL returns 200 HTTP OK
|
||||
func checkURL(url string) error {
|
||||
resp, err := http.Get(url)
|
||||
glog.Infof("%s response: %v %+v", url, err, resp)
|
||||
klog.Infof("%s response: %v %+v", url, err, resp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "checkURL")
|
||||
}
|
||||
|
|
|
@ -25,13 +25,13 @@ import (
|
|||
"strconv"
|
||||
|
||||
"github.com/docker/machine/libmachine/mcnerror"
|
||||
"github.com/golang/glog"
|
||||
"github.com/mitchellh/go-ps"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/docker/machine/libmachine"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/klog/v2"
|
||||
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
|
@ -91,27 +91,26 @@ func init() {
|
|||
if err := viper.BindPFlags(deleteCmd.Flags()); err != nil {
|
||||
exit.Error(reason.InternalBindFlags, "unable to bind flags", err)
|
||||
}
|
||||
RootCmd.AddCommand(deleteCmd)
|
||||
}
|
||||
|
||||
// shotgun cleanup to delete orphaned docker container data
|
||||
func deleteContainersAndVolumes(ociBin string) {
|
||||
if _, err := exec.LookPath(ociBin); err != nil {
|
||||
glog.Infof("skipping deleteContainersAndVolumes for %s: %v", ociBin, err)
|
||||
klog.Infof("skipping deleteContainersAndVolumes for %s: %v", ociBin, err)
|
||||
return
|
||||
}
|
||||
|
||||
glog.Infof("deleting containers and volumes ...")
|
||||
klog.Infof("deleting containers and volumes ...")
|
||||
|
||||
delLabel := fmt.Sprintf("%s=%s", oci.CreatedByLabelKey, "true")
|
||||
errs := oci.DeleteContainersByLabel(ociBin, delLabel)
|
||||
if len(errs) > 0 { // it will error if there is no container to delete
|
||||
glog.Infof("error delete containers by label %q (might be okay): %+v", delLabel, errs)
|
||||
klog.Infof("error delete containers by label %q (might be okay): %+v", delLabel, errs)
|
||||
}
|
||||
|
||||
errs = oci.DeleteAllVolumesByLabel(ociBin, delLabel)
|
||||
if len(errs) > 0 { // it will not error if there is nothing to delete
|
||||
glog.Warningf("error delete volumes by label %q (might be okay): %+v", delLabel, errs)
|
||||
klog.Warningf("error delete volumes by label %q (might be okay): %+v", delLabel, errs)
|
||||
}
|
||||
|
||||
if ociBin == oci.Podman {
|
||||
|
@ -121,7 +120,7 @@ func deleteContainersAndVolumes(ociBin string) {
|
|||
|
||||
errs = oci.PruneAllVolumesByLabel(ociBin, delLabel)
|
||||
if len(errs) > 0 { // it will not error if there is nothing to delete
|
||||
glog.Warningf("error pruning volumes by label %q (might be okay): %+v", delLabel, errs)
|
||||
klog.Warningf("error pruning volumes by label %q (might be okay): %+v", delLabel, errs)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,7 +134,7 @@ func runDelete(cmd *cobra.Command, args []string) {
|
|||
|
||||
validProfiles, invalidProfiles, err := config.ListProfiles()
|
||||
if err != nil {
|
||||
glog.Warningf("'error loading profiles in minikube home %q: %v", localpath.MiniPath(), err)
|
||||
klog.Warningf("'error loading profiles in minikube home %q: %v", localpath.MiniPath(), err)
|
||||
}
|
||||
profilesToDelete := append(validProfiles, invalidProfiles...)
|
||||
// in the case user has more than 1 profile and runs --purge
|
||||
|
@ -195,7 +194,7 @@ func runDelete(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
|
||||
func purgeMinikubeDirectory() {
|
||||
glog.Infof("Purging the '.minikube' directory located at %s", localpath.MiniPath())
|
||||
klog.Infof("Purging the '.minikube' directory located at %s", localpath.MiniPath())
|
||||
if err := os.RemoveAll(localpath.MiniPath()); err != nil {
|
||||
exit.Error(reason.HostPurge, "unable to delete minikube config folder", err)
|
||||
}
|
||||
|
@ -204,7 +203,7 @@ func purgeMinikubeDirectory() {
|
|||
|
||||
// DeleteProfiles deletes one or more profiles
|
||||
func DeleteProfiles(profiles []*config.Profile) []error {
|
||||
glog.Infof("DeleteProfiles")
|
||||
klog.Infof("DeleteProfiles")
|
||||
var errs []error
|
||||
for _, profile := range profiles {
|
||||
err := deleteProfile(profile)
|
||||
|
@ -237,11 +236,11 @@ func deletePossibleKicLeftOver(cname string, driverName string) {
|
|||
}
|
||||
|
||||
if _, err := exec.LookPath(bin); err != nil {
|
||||
glog.Infof("skipping deletePossibleKicLeftOver for %s: %v", bin, err)
|
||||
klog.Infof("skipping deletePossibleKicLeftOver for %s: %v", bin, err)
|
||||
return
|
||||
}
|
||||
|
||||
glog.Infof("deleting possible KIC leftovers for %s (driver=%s) ...", cname, driverName)
|
||||
klog.Infof("deleting possible KIC leftovers for %s (driver=%s) ...", cname, driverName)
|
||||
|
||||
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, cname)
|
||||
cs, err := oci.ListContainersByLabel(bin, delLabel)
|
||||
|
@ -250,7 +249,7 @@ func deletePossibleKicLeftOver(cname string, driverName string) {
|
|||
out.T(style.DeletingHost, `Deleting container "{{.name}}" ...`, out.V{"name": cname})
|
||||
err := oci.DeleteContainer(bin, c)
|
||||
if err != nil { // it will error if there is no container to delete
|
||||
glog.Errorf("error deleting container %q. You may want to delete it manually :\n%v", cname, err)
|
||||
klog.Errorf("error deleting container %q. You may want to delete it manually :\n%v", cname, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -258,7 +257,12 @@ func deletePossibleKicLeftOver(cname string, driverName string) {
|
|||
|
||||
errs := oci.DeleteAllVolumesByLabel(bin, delLabel)
|
||||
if errs != nil { // it will not error if there is nothing to delete
|
||||
glog.Warningf("error deleting volumes (might be okay).\nTo see the list of volumes run: 'docker volume ls'\n:%v", errs)
|
||||
klog.Warningf("error deleting volumes (might be okay).\nTo see the list of volumes run: 'docker volume ls'\n:%v", errs)
|
||||
}
|
||||
|
||||
errs = oci.DeleteKICNetworks()
|
||||
if errs != nil {
|
||||
klog.Warningf("error deleting leftover networks (might be okay).\nTo see the list of networks: 'docker network ls'\n:%v", errs)
|
||||
}
|
||||
|
||||
if bin == oci.Podman {
|
||||
|
@ -268,17 +272,17 @@ func deletePossibleKicLeftOver(cname string, driverName string) {
|
|||
|
||||
errs = oci.PruneAllVolumesByLabel(bin, delLabel)
|
||||
if len(errs) > 0 { // it will not error if there is nothing to delete
|
||||
glog.Warningf("error pruning volume (might be okay):\n%v", errs)
|
||||
klog.Warningf("error pruning volume (might be okay):\n%v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
func deleteProfile(profile *config.Profile) error {
|
||||
glog.Infof("Deleting %s", profile.Name)
|
||||
klog.Infof("Deleting %s", profile.Name)
|
||||
register.Reg.SetStep(register.Deleting)
|
||||
|
||||
viper.Set(config.ProfileName, profile.Name)
|
||||
if profile.Config != nil {
|
||||
glog.Infof("%s configuration: %+v", profile.Name, profile.Config)
|
||||
klog.Infof("%s configuration: %+v", profile.Name, profile.Config)
|
||||
|
||||
// if driver is oci driver, delete containers and volumes
|
||||
if driver.IsKIC(profile.Config.Driver) {
|
||||
|
@ -289,7 +293,7 @@ func deleteProfile(profile *config.Profile) error {
|
|||
}
|
||||
}
|
||||
} else {
|
||||
glog.Infof("%s has no configuration, will try to make it work anyways", profile.Name)
|
||||
klog.Infof("%s has no configuration, will try to make it work anyways", profile.Name)
|
||||
}
|
||||
|
||||
api, err := machine.NewAPIClient()
|
||||
|
@ -346,7 +350,7 @@ func deleteHosts(api libmachine.API, cc *config.ClusterConfig) {
|
|||
if err := machine.DeleteHost(api, machineName); err != nil {
|
||||
switch errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
glog.Infof("Host %s does not exist. Proceeding ahead with cleanup.", machineName)
|
||||
klog.Infof("Host %s does not exist. Proceeding ahead with cleanup.", machineName)
|
||||
default:
|
||||
out.FailureT("Failed to delete cluster: {{.error}}", out.V{"error": err})
|
||||
out.T(style.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": machineName})
|
||||
|
@ -430,7 +434,7 @@ func uninstallKubernetes(api libmachine.API, cc config.ClusterConfig, n config.N
|
|||
// Unpause the cluster if necessary to avoid hung kubeadm
|
||||
_, err = cluster.Unpause(cr, r, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("unpause failed: %v", err)
|
||||
klog.Errorf("unpause failed: %v", err)
|
||||
}
|
||||
|
||||
if err = clusterBootstrapper.DeleteCluster(cc.KubernetesConfig); err != nil {
|
||||
|
@ -474,7 +478,7 @@ func handleMultipleDeletionErrors(errors []error) {
|
|||
deletionError, ok := err.(DeletionError)
|
||||
|
||||
if ok {
|
||||
glog.Errorln(deletionError.Error())
|
||||
klog.Errorln(deletionError.Error())
|
||||
} else {
|
||||
exit.Error(reason.GuestDeletion, "Could not process errors from failed deletion", err)
|
||||
}
|
||||
|
@ -499,12 +503,12 @@ func killMountProcess() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
glog.Infof("Found %s ...", pidPath)
|
||||
klog.Infof("Found %s ...", pidPath)
|
||||
out, err := ioutil.ReadFile(pidPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "ReadFile")
|
||||
}
|
||||
glog.Infof("pidfile contents: %s", out)
|
||||
klog.Infof("pidfile contents: %s", out)
|
||||
pid, err := strconv.Atoi(string(out))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error parsing pid")
|
||||
|
@ -515,7 +519,7 @@ func killMountProcess() error {
|
|||
return errors.Wrap(err, "ps.FindProcess")
|
||||
}
|
||||
if entry == nil {
|
||||
glog.Infof("Stale pid: %d", pid)
|
||||
klog.Infof("Stale pid: %d", pid)
|
||||
if err := os.Remove(pidPath); err != nil {
|
||||
return errors.Wrap(err, "Removing stale pid")
|
||||
}
|
||||
|
@ -523,15 +527,15 @@ func killMountProcess() error {
|
|||
}
|
||||
|
||||
// We found a process, but it still may not be ours.
|
||||
glog.Infof("Found process %d: %s", pid, entry.Executable())
|
||||
klog.Infof("Found process %d: %s", pid, entry.Executable())
|
||||
proc, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "os.FindProcess")
|
||||
}
|
||||
|
||||
glog.Infof("Killing pid %d ...", pid)
|
||||
klog.Infof("Killing pid %d ...", pid)
|
||||
if err := proc.Kill(); err != nil {
|
||||
glog.Infof("Kill failed with %v - removing probably stale pid...", err)
|
||||
klog.Infof("Kill failed with %v - removing probably stale pid...", err)
|
||||
if err := os.Remove(pidPath); err != nil {
|
||||
return errors.Wrap(err, "Removing likely stale unkillable pid")
|
||||
}
|
||||
|
|
|
@ -28,8 +28,8 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
|
@ -163,7 +163,7 @@ var dockerEnvCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
if ok := isDockerActive(co.CP.Runner); !ok {
|
||||
glog.Warningf("dockerd is not active will try to restart it...")
|
||||
klog.Warningf("dockerd is not active will try to restart it...")
|
||||
mustRestartDocker(cname, co.CP.Runner)
|
||||
}
|
||||
|
||||
|
@ -195,7 +195,7 @@ var dockerEnvCmd = &cobra.Command{
|
|||
|
||||
dockerPath, err := exec.LookPath("docker")
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to find docker in path - skipping connectivity check: %v", err)
|
||||
klog.Warningf("Unable to find docker in path - skipping connectivity check: %v", err)
|
||||
dockerPath = ""
|
||||
}
|
||||
|
||||
|
@ -203,7 +203,7 @@ var dockerEnvCmd = &cobra.Command{
|
|||
out, err := tryDockerConnectivity("docker", ec)
|
||||
if err != nil { // docker might be up but been loaded with wrong certs/config
|
||||
// to fix issues like this #8185
|
||||
glog.Warningf("couldn't connect to docker inside minikube. will try to restart dockerd service... output: %s error: %v", string(out), err)
|
||||
klog.Warningf("couldn't connect to docker inside minikube. will try to restart dockerd service... output: %s error: %v", string(out), err)
|
||||
mustRestartDocker(cname, co.CP.Runner)
|
||||
}
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ func dockerEnvVarsList(ec DockerEnvConfig) []string {
|
|||
func tryDockerConnectivity(bin string, ec DockerEnvConfig) ([]byte, error) {
|
||||
c := exec.Command(bin, "version", "--format={{.Server}}")
|
||||
c.Env = append(os.Environ(), dockerEnvVarsList(ec)...)
|
||||
glog.Infof("Testing Docker connectivity with: %v", c)
|
||||
klog.Infof("Testing Docker connectivity with: %v", c)
|
||||
return c.CombinedOutput()
|
||||
}
|
||||
|
||||
|
|
|
@ -22,8 +22,8 @@ import (
|
|||
"os/exec"
|
||||
"syscall"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/node"
|
||||
|
@ -48,7 +48,7 @@ minikube kubectl -- get pods --namespace kube-system`,
|
|||
out.ErrLn("Error caching kubectl: %v", err)
|
||||
}
|
||||
|
||||
glog.Infof("Running %s %v", c.Path, args)
|
||||
klog.Infof("Running %s %v", c.Path, args)
|
||||
c.Stdin = os.Stdin
|
||||
c.Stdout = os.Stdout
|
||||
c.Stderr = os.Stderr
|
||||
|
|
|
@ -27,9 +27,9 @@ import (
|
|||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
|
@ -98,7 +98,7 @@ var mountCmd = &cobra.Command{
|
|||
exit.Message(reason.Usage, "Target directory {{.path}} must be an absolute path", out.V{"path": vmPath})
|
||||
}
|
||||
var debugVal int
|
||||
if glog.V(1) {
|
||||
if klog.V(1).Enabled() {
|
||||
debugVal = 1 // ufs.StartServer takes int debug param
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ var mountCmd = &cobra.Command{
|
|||
var ip net.IP
|
||||
var err error
|
||||
if mountIP == "" {
|
||||
ip, err = cluster.HostIP(co.CP.Host)
|
||||
ip, err = cluster.HostIP(co.CP.Host, co.Config.Name)
|
||||
if err != nil {
|
||||
exit.Error(reason.IfHostIP, "Error getting the host IP address to use from within the VM", err)
|
||||
}
|
||||
|
|
|
@ -20,8 +20,8 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
|
@ -41,9 +41,9 @@ var nodeListCmd = &cobra.Command{
|
|||
_, cc := mustload.Partial(cname)
|
||||
|
||||
if len(cc.Nodes) < 1 {
|
||||
glog.Warningf("Did not found any minikube node.")
|
||||
klog.Warningf("Did not found any minikube node.")
|
||||
} else {
|
||||
glog.Infof("%v", cc.Nodes)
|
||||
klog.Infof("%v", cc.Nodes)
|
||||
}
|
||||
|
||||
for _, n := range cc.Nodes {
|
||||
|
|
|
@ -19,10 +19,10 @@ package cmd
|
|||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
|
@ -54,7 +54,7 @@ func runPause(cmd *cobra.Command, args []string) {
|
|||
register.SetEventLogPath(localpath.EventLog(ClusterFlagValue()))
|
||||
register.Reg.SetStep(register.Pausing)
|
||||
|
||||
glog.Infof("namespaces: %v keys: %v", namespaces, viper.AllSettings())
|
||||
klog.Infof("namespaces: %v keys: %v", namespaces, viper.AllSettings())
|
||||
if allNamespaces {
|
||||
namespaces = nil // all
|
||||
} else if len(namespaces) == 0 {
|
||||
|
|
|
@ -24,10 +24,10 @@ import (
|
|||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
configCmd "k8s.io/minikube/cmd/minikube/cmd/config"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
|
@ -62,13 +62,6 @@ var RootCmd = &cobra.Command{
|
|||
exit.Error(reason.HostHomeMkdir, "Error creating minikube directory", err)
|
||||
}
|
||||
}
|
||||
|
||||
logDir := pflag.Lookup("log_dir")
|
||||
if !logDir.Changed {
|
||||
if err := logDir.Value.Set(localpath.MakeMiniPath("logs")); err != nil {
|
||||
exit.Error(reason.InternalFlagSet, "logdir set failed", err)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -83,16 +76,16 @@ func Execute() {
|
|||
for _, c := range RootCmd.Commands() {
|
||||
c.Short = translate.T(c.Short)
|
||||
c.Long = translate.T(c.Long)
|
||||
c.Flags().VisitAll(func(flag *pflag.Flag) {
|
||||
flag.Usage = translate.T(flag.Usage)
|
||||
c.Flags().VisitAll(func(f *pflag.Flag) {
|
||||
f.Usage = translate.T(f.Usage)
|
||||
})
|
||||
|
||||
c.SetUsageTemplate(usageTemplate())
|
||||
}
|
||||
RootCmd.Short = translate.T(RootCmd.Short)
|
||||
RootCmd.Long = translate.T(RootCmd.Long)
|
||||
RootCmd.Flags().VisitAll(func(flag *pflag.Flag) {
|
||||
flag.Usage = translate.T(flag.Usage)
|
||||
RootCmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||||
f.Usage = translate.T(f.Usage)
|
||||
})
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
|
@ -103,11 +96,11 @@ func Execute() {
|
|||
|
||||
// Universally ensure that we never speak to the wrong DOCKER_HOST
|
||||
if err := oci.PointToHostDockerDaemon(); err != nil {
|
||||
glog.Errorf("oci env: %v", err)
|
||||
klog.Errorf("oci env: %v", err)
|
||||
}
|
||||
|
||||
if err := oci.PointToHostPodman(); err != nil {
|
||||
glog.Errorf("oci env: %v", err)
|
||||
klog.Errorf("oci env: %v", err)
|
||||
}
|
||||
|
||||
if err := RootCmd.Execute(); err != nil {
|
||||
|
@ -146,27 +139,7 @@ func usageTemplate() string {
|
|||
`, translate.T("Usage"), translate.T("Aliases"), translate.T("Examples"), translate.T("Available Commands"), translate.T("Flags"), translate.T("Global Flags"), translate.T("Additional help topics"), translate.T(`Use "{{.CommandPath}} [command] --help" for more information about a command.`))
|
||||
}
|
||||
|
||||
// Handle config values for flags used in external packages (e.g. glog)
|
||||
// by setting them directly, using values from viper when not passed in as args
|
||||
func setFlagsUsingViper() {
|
||||
for _, config := range []string{"alsologtostderr", "log_dir", "v"} {
|
||||
a := pflag.Lookup(config)
|
||||
viper.SetDefault(a.Name, a.DefValue)
|
||||
// If the flag is set, override viper value
|
||||
if a.Changed {
|
||||
viper.Set(a.Name, a.Value.String())
|
||||
}
|
||||
// Viper will give precedence first to calls to the Set command,
|
||||
// then to values from the config.yml
|
||||
if err := a.Value.Set(viper.GetString(a.Name)); err != nil {
|
||||
exit.Error(reason.InternalFlagSet, fmt.Sprintf("failed to set value for %q", a.Name), err)
|
||||
}
|
||||
a.Changed = true
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
translate.DetermineLocale()
|
||||
RootCmd.PersistentFlags().StringP(config.ProfileName, "p", constants.DefaultClusterName, `The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently.`)
|
||||
RootCmd.PersistentFlags().StringP(configCmd.Bootstrapper, "b", "kubeadm", "The name of the cluster bootstrapper that will set up the Kubernetes cluster.")
|
||||
|
||||
|
@ -234,10 +207,20 @@ func init() {
|
|||
RootCmd.AddCommand(completionCmd)
|
||||
templates.ActsAsRootCommand(RootCmd, []string{"options"}, groups...)
|
||||
|
||||
klog.InitFlags(nil)
|
||||
if err := goflag.Set("logtostderr", "false"); err != nil {
|
||||
klog.Warningf("Unable to set default flag value for logtostderr: %v", err)
|
||||
}
|
||||
if err := goflag.Set("alsologtostderr", "false"); err != nil {
|
||||
klog.Warningf("Unable to set default flag value for alsologtostderr: %v", err)
|
||||
}
|
||||
|
||||
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
|
||||
if err := viper.BindPFlags(RootCmd.PersistentFlags()); err != nil {
|
||||
exit.Error(reason.InternalBindFlags, "Unable to bind flags", err)
|
||||
}
|
||||
|
||||
translate.DetermineLocale()
|
||||
cobra.OnInitialize(initConfig)
|
||||
}
|
||||
|
||||
|
@ -249,7 +232,7 @@ func initConfig() {
|
|||
if err := viper.ReadInConfig(); err != nil {
|
||||
// This config file is optional, so don't emit errors if missing
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); !ok {
|
||||
glog.Warningf("Error reading config file at %s: %v", configPath, err)
|
||||
klog.Warningf("Error reading config file at %s: %v", configPath, err)
|
||||
}
|
||||
}
|
||||
setupViper()
|
||||
|
@ -270,11 +253,10 @@ func setupViper() {
|
|||
viper.SetDefault(config.WantNoneDriverWarning, true)
|
||||
viper.SetDefault(config.ShowDriverDeprecationNotification, true)
|
||||
viper.SetDefault(config.ShowBootstrapperDeprecationNotification, true)
|
||||
setFlagsUsingViper()
|
||||
}
|
||||
|
||||
func addToPath(dir string) {
|
||||
new := fmt.Sprintf("%s:%s", dir, os.Getenv("PATH"))
|
||||
glog.Infof("Updating PATH: %s", dir)
|
||||
klog.Infof("Updating PATH: %s", dir)
|
||||
os.Setenv("PATH", new)
|
||||
}
|
||||
|
|
|
@ -19,99 +19,19 @@ package cmd
|
|||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/tests"
|
||||
)
|
||||
|
||||
type configTest struct {
|
||||
Name string
|
||||
EnvValue string
|
||||
ConfigValue string
|
||||
FlagValue string
|
||||
ExpectedValue string
|
||||
}
|
||||
|
||||
var configTests = []configTest{
|
||||
{
|
||||
Name: "v",
|
||||
ExpectedValue: "0",
|
||||
},
|
||||
{
|
||||
Name: "v",
|
||||
ConfigValue: `{ "v":"999" }`,
|
||||
ExpectedValue: "999",
|
||||
},
|
||||
{
|
||||
Name: "v",
|
||||
FlagValue: "0",
|
||||
ExpectedValue: "0",
|
||||
},
|
||||
{
|
||||
Name: "v",
|
||||
EnvValue: "123",
|
||||
ExpectedValue: "123",
|
||||
},
|
||||
{
|
||||
Name: "v",
|
||||
FlagValue: "3",
|
||||
ExpectedValue: "3",
|
||||
},
|
||||
// Flag should override config and env
|
||||
{
|
||||
Name: "v",
|
||||
FlagValue: "3",
|
||||
ConfigValue: `{ "v": "222" }`,
|
||||
EnvValue: "888",
|
||||
ExpectedValue: "3",
|
||||
},
|
||||
// Env should override config
|
||||
{
|
||||
Name: "v",
|
||||
EnvValue: "2",
|
||||
ConfigValue: `{ "v": "999" }`,
|
||||
ExpectedValue: "2",
|
||||
},
|
||||
// Env should not override flags not on whitelist
|
||||
{
|
||||
Name: "log_backtrace_at",
|
||||
EnvValue: ":2",
|
||||
ExpectedValue: ":0",
|
||||
},
|
||||
}
|
||||
|
||||
func runCommand(f func(*cobra.Command, []string)) {
|
||||
cmd := cobra.Command{}
|
||||
var args []string
|
||||
f(&cmd, args)
|
||||
}
|
||||
|
||||
// Temporarily unsets the env variables for the test cases.
|
||||
// Returns a function to reset them to their initial values.
|
||||
func hideEnv(t *testing.T) func(t *testing.T) {
|
||||
envs := make(map[string]string)
|
||||
for _, env := range os.Environ() {
|
||||
if strings.HasPrefix(env, minikubeEnvPrefix) {
|
||||
line := strings.Split(env, "=")
|
||||
key, val := line[0], line[1]
|
||||
envs[key] = val
|
||||
t.Logf("TestConfig: Unsetting %s=%s for unit test!", key, val)
|
||||
os.Unsetenv(key)
|
||||
}
|
||||
}
|
||||
return func(t *testing.T) {
|
||||
for key, val := range envs {
|
||||
t.Logf("TestConfig: Finished test, Resetting Env %s=%s", key, val)
|
||||
os.Setenv(key, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreRunDirectories(t *testing.T) {
|
||||
// Make sure we create the required directories.
|
||||
tempDir := tests.MakeTempDir()
|
||||
|
@ -140,60 +60,3 @@ func TestViperConfig(t *testing.T) {
|
|||
t.Fatalf("Viper did not read test config file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func getEnvVarName(name string) string {
|
||||
return minikubeEnvPrefix + "_" + strings.ToUpper(name)
|
||||
}
|
||||
|
||||
func setValues(tt configTest) error {
|
||||
if tt.FlagValue != "" {
|
||||
if err := pflag.Set(tt.Name, tt.FlagValue); err != nil {
|
||||
return errors.Wrap(err, "flag set")
|
||||
}
|
||||
}
|
||||
if tt.EnvValue != "" {
|
||||
s := strings.Replace(getEnvVarName(tt.Name), "-", "_", -1)
|
||||
os.Setenv(s, tt.EnvValue)
|
||||
}
|
||||
if tt.ConfigValue != "" {
|
||||
if err := initTestConfig(tt.ConfigValue); err != nil {
|
||||
return errors.Wrapf(err, "Config %s not read correctly", tt.ConfigValue)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unsetValues(name string) error {
|
||||
f := pflag.Lookup(name)
|
||||
if err := f.Value.Set(f.DefValue); err != nil {
|
||||
return errors.Wrapf(err, "set(%s)", f.DefValue)
|
||||
}
|
||||
f.Changed = false
|
||||
os.Unsetenv(getEnvVarName(name))
|
||||
viper.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestViperAndFlags(t *testing.T) {
|
||||
restore := hideEnv(t)
|
||||
defer restore(t)
|
||||
for _, tt := range configTests {
|
||||
err := setValues(tt)
|
||||
if err != nil {
|
||||
t.Fatalf("setValues: %v", err)
|
||||
}
|
||||
setupViper()
|
||||
f := pflag.Lookup(tt.Name)
|
||||
if f == nil {
|
||||
t.Fatalf("Could not find flag for %s", tt.Name)
|
||||
}
|
||||
actual := f.Value.String()
|
||||
if actual != tt.ExpectedValue {
|
||||
t.Errorf("pflag.Value(%s) => %s, wanted %s [%+v]", tt.Name, actual, tt.ExpectedValue, tt)
|
||||
}
|
||||
// Some flag validation may not accept their default value, such as log_at_backtrace :(
|
||||
if err := unsetValues(tt.Name); err != nil {
|
||||
t.Logf("unsetValues(%s) failed: %v", tt.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,9 +29,9 @@ import (
|
|||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/kapi"
|
||||
"k8s.io/minikube/pkg/minikube/browser"
|
||||
|
@ -154,7 +154,7 @@ func openURLs(svc string, urls []string) {
|
|||
for _, u := range urls {
|
||||
_, err := url.Parse(u)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to parse url %q: %v (will not open)", u, err)
|
||||
klog.Warningf("failed to parse url %q: %v (will not open)", u, err)
|
||||
out.String(fmt.Sprintf("%s\n", u))
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -67,6 +67,6 @@ var sshCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func init() {
|
||||
sshCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.")
|
||||
sshCmd.Flags().BoolVar(&nativeSSHClient, "native-ssh", true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.")
|
||||
sshCmd.Flags().StringVarP(&nodeName, "node", "n", "", "The node to ssh into. Defaults to the primary control plane.")
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
|
||||
"github.com/blang/semver"
|
||||
"github.com/docker/machine/libmachine/ssh"
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
|
@ -40,6 +39,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
|
||||
|
@ -100,17 +100,17 @@ func platform() string {
|
|||
hi, err := gopshost.Info()
|
||||
if err == nil {
|
||||
s.WriteString(fmt.Sprintf("%s %s", strings.Title(hi.Platform), hi.PlatformVersion))
|
||||
glog.Infof("hostinfo: %+v", hi)
|
||||
klog.Infof("hostinfo: %+v", hi)
|
||||
} else {
|
||||
glog.Warningf("gopshost.Info returned error: %v", err)
|
||||
klog.Warningf("gopshost.Info returned error: %v", err)
|
||||
s.WriteString(runtime.GOOS)
|
||||
}
|
||||
|
||||
vsys, vrole, err := gopshost.Virtualization()
|
||||
if err != nil {
|
||||
glog.Warningf("gopshost.Virtualization returned error: %v", err)
|
||||
klog.Warningf("gopshost.Virtualization returned error: %v", err)
|
||||
} else {
|
||||
glog.Infof("virtualization: %s %s", vsys, vrole)
|
||||
klog.Infof("virtualization: %s %s", vsys, vrole)
|
||||
}
|
||||
|
||||
// This environment is exotic, let's output a bit more.
|
||||
|
@ -155,7 +155,7 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
|
||||
if !config.ProfileNameValid(ClusterFlagValue()) {
|
||||
out.WarningT("Profile name '{{.name}}' is not valid", out.V{"name": ClusterFlagValue()})
|
||||
exit.Message(reason.Usage, "Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.")
|
||||
exit.Message(reason.Usage, "Only alphanumeric and dashes '-' are permitted. Minimum 2 characters, starting with alphanumeric.")
|
||||
}
|
||||
|
||||
existing, err := config.Load(ClusterFlagValue())
|
||||
|
@ -197,7 +197,7 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
machine.MaybeDisplayAdvice(err, ds.Name)
|
||||
if specified {
|
||||
// If the user specified a driver, don't fallback to anything else
|
||||
exit.Error(reason.GuestProvision, "error provisioning host", err)
|
||||
exitGuestProvision(err)
|
||||
} else {
|
||||
success := false
|
||||
// Walk down the rest of the options
|
||||
|
@ -207,7 +207,7 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
// Delete the existing cluster and try again with the next driver on the list
|
||||
profile, err := config.LoadProfile(ClusterFlagValue())
|
||||
if err != nil {
|
||||
glog.Warningf("%s profile does not exist, trying anyways.", ClusterFlagValue())
|
||||
klog.Warningf("%s profile does not exist, trying anyways.", ClusterFlagValue())
|
||||
}
|
||||
|
||||
err = deleteProfile(profile)
|
||||
|
@ -224,19 +224,22 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
}
|
||||
if !success {
|
||||
exit.Error(reason.GuestProvision, "error provisioning host", err)
|
||||
exitGuestProvision(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if existing != nil && driver.IsKIC(existing.Driver) {
|
||||
if viper.GetBool(createMount) {
|
||||
mount := viper.GetString(mountString)
|
||||
if len(existing.ContainerVolumeMounts) != 1 || existing.ContainerVolumeMounts[0] != mount {
|
||||
old := ""
|
||||
if len(existing.ContainerVolumeMounts) > 0 {
|
||||
old = existing.ContainerVolumeMounts[0]
|
||||
}
|
||||
if mount := viper.GetString(mountString); old != mount {
|
||||
exit.Message(reason.GuestMountConflict, "Sorry, {{.driver}} does not allow mounts to be changed after container creation (previous mount: '{{.old}}', new mount: '{{.new}})'", out.V{
|
||||
"driver": existing.Driver,
|
||||
"new": mount,
|
||||
"old": existing.ContainerVolumeMounts[0],
|
||||
"old": old,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -248,7 +251,7 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
stopProfile(existing.Name)
|
||||
starter, err = provisionWithDriver(cmd, ds, existing)
|
||||
if err != nil {
|
||||
exit.Error(reason.GuestProvision, "error provisioning host", err)
|
||||
exitGuestProvision(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -260,17 +263,17 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
|
||||
if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Cfg.Name); err != nil {
|
||||
glog.Errorf("kubectl info: %v", err)
|
||||
klog.Errorf("kubectl info: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *config.ClusterConfig) (node.Starter, error) {
|
||||
driverName := ds.Name
|
||||
glog.Infof("selected driver: %s", driverName)
|
||||
klog.Infof("selected driver: %s", driverName)
|
||||
validateDriver(ds, existing)
|
||||
err := autoSetDriverOptions(cmd, driverName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error autoSetOptions : %v", err)
|
||||
klog.Errorf("Error autoSetOptions : %v", err)
|
||||
}
|
||||
|
||||
validateFlags(cmd, driverName)
|
||||
|
@ -455,7 +458,7 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
|
|||
|
||||
cluster := semver.MustParse(strings.TrimPrefix(k8sVersion, version.VersionPrefix))
|
||||
minorSkew := int(math.Abs(float64(int(client.Minor) - int(cluster.Minor))))
|
||||
glog.Infof("kubectl: %s, cluster: %s (minor skew: %d)", client, cluster, minorSkew)
|
||||
klog.Infof("kubectl: %s, cluster: %s (minor skew: %d)", client, cluster, minorSkew)
|
||||
|
||||
if client.Major != cluster.Major || minorSkew > 1 {
|
||||
out.Ln("")
|
||||
|
@ -611,19 +614,19 @@ func hostDriver(existing *config.ClusterConfig) string {
|
|||
}
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
glog.Warningf("selectDriver NewAPIClient: %v", err)
|
||||
klog.Warningf("selectDriver NewAPIClient: %v", err)
|
||||
return existing.Driver
|
||||
}
|
||||
|
||||
cp, err := config.PrimaryControlPlane(existing)
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to get control plane from existing config: %v", err)
|
||||
klog.Warningf("Unable to get control plane from existing config: %v", err)
|
||||
return existing.Driver
|
||||
}
|
||||
machineName := driver.MachineName(*existing, cp)
|
||||
h, err := api.Load(machineName)
|
||||
if err != nil {
|
||||
glog.Warningf("api.Load failed for %s: %v", machineName, err)
|
||||
klog.Warningf("api.Load failed for %s: %v", machineName, err)
|
||||
if existing.VMDriver != "" {
|
||||
return existing.VMDriver
|
||||
}
|
||||
|
@ -674,7 +677,7 @@ func validateSpecifiedDriver(existing *config.ClusterConfig) {
|
|||
// validateDriver validates that the selected driver appears sane, exits if not
|
||||
func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
|
||||
name := ds.Name
|
||||
glog.Infof("validating driver %q against %+v", name, existing)
|
||||
klog.Infof("validating driver %q against %+v", name, existing)
|
||||
if !driver.Supported(name) {
|
||||
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS})
|
||||
}
|
||||
|
@ -685,7 +688,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
|
|||
}
|
||||
|
||||
st := ds.State
|
||||
glog.Infof("status for %s: %+v", name, st)
|
||||
klog.Infof("status for %s: %+v", name, st)
|
||||
|
||||
if st.NeedsImprovement {
|
||||
out.T(style.Improvement, `For improved {{.driver}} performance, {{.fix}}`, out.V{"driver": driver.FullName(ds.Name), "fix": translate.T(st.Fix)})
|
||||
|
@ -725,7 +728,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
|
|||
func selectImageRepository(mirrorCountry string, v semver.Version) (bool, string, error) {
|
||||
var tryCountries []string
|
||||
var fallback string
|
||||
glog.Infof("selecting image repository for country %s ...", mirrorCountry)
|
||||
klog.Infof("selecting image repository for country %s ...", mirrorCountry)
|
||||
|
||||
if mirrorCountry != "" {
|
||||
localRepos, ok := constants.ImageRepositories[mirrorCountry]
|
||||
|
@ -777,7 +780,7 @@ func selectImageRepository(mirrorCountry string, v semver.Version) (bool, string
|
|||
func validateUser(drvName string) {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting the current user: %v", err)
|
||||
klog.Errorf("Error getting the current user: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -794,7 +797,7 @@ func validateUser(drvName string) {
|
|||
|
||||
out.ErrT(style.Stopped, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName})
|
||||
out.ErrT(style.Tip, "If you are running minikube within a VM, consider using --driver=none:")
|
||||
out.ErrT(style.Documentation, " https://minikube.sigs.k8s.io/docs/reference/drivers/none/")
|
||||
out.ErrT(style.Documentation, " {{.url}}", out.V{"url": "https://minikube.sigs.k8s.io/docs/reference/drivers/none/"})
|
||||
|
||||
cname := ClusterFlagValue()
|
||||
_, err = config.Load(cname)
|
||||
|
@ -811,10 +814,10 @@ func validateUser(drvName string) {
|
|||
func memoryLimits(drvName string) (int, int, error) {
|
||||
info, cpuErr, memErr, diskErr := machine.CachedHostInfo()
|
||||
if cpuErr != nil {
|
||||
glog.Warningf("could not get system cpu info while verifying memory limits, which might be okay: %v", cpuErr)
|
||||
klog.Warningf("could not get system cpu info while verifying memory limits, which might be okay: %v", cpuErr)
|
||||
}
|
||||
if diskErr != nil {
|
||||
glog.Warningf("could not get system disk info while verifying memory limits, which might be okay: %v", diskErr)
|
||||
klog.Warningf("could not get system disk info while verifying memory limits, which might be okay: %v", diskErr)
|
||||
}
|
||||
|
||||
if memErr != nil {
|
||||
|
@ -829,7 +832,7 @@ func memoryLimits(drvName string) (int, int, error) {
|
|||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
containerLimit = int(s.TotalMemory / 1024 / 1024)
|
||||
containerLimit = util.ConvertBytesToMB(s.TotalMemory)
|
||||
}
|
||||
|
||||
return sysLimit, containerLimit, nil
|
||||
|
@ -878,7 +881,7 @@ func validateRequestedMemorySize(req int, drvName string) {
|
|||
// TODO: Fix MB vs MiB confusion
|
||||
sysLimit, containerLimit, err := memoryLimits(drvName)
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to query memory limits: %v", err)
|
||||
klog.Warningf("Unable to query memory limits: %v", err)
|
||||
}
|
||||
|
||||
// Detect if their system doesn't have enough memory to work with.
|
||||
|
@ -935,7 +938,7 @@ func validateCPUCount(drvName string) {
|
|||
// Uses the gopsutil cpu package to count the number of physical cpu cores
|
||||
ci, err := cpu.Counts(false)
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to get CPU info: %v", err)
|
||||
klog.Warningf("Unable to get CPU info: %v", err)
|
||||
} else {
|
||||
cpuCount = ci
|
||||
}
|
||||
|
@ -1085,7 +1088,7 @@ func validateRegistryMirror() {
|
|||
for _, loc := range registryMirror {
|
||||
URL, err := url.Parse(loc)
|
||||
if err != nil {
|
||||
glog.Errorln("Error Parsing URL: ", err)
|
||||
klog.Errorln("Error Parsing URL: ", err)
|
||||
}
|
||||
if (URL.Scheme != "http" && URL.Scheme != "https") || URL.Path != "" {
|
||||
exit.Message(reason.Usage, "Sorry, the url provided with the --registry-mirror flag is invalid: {{.url}}", out.V{"url": loc})
|
||||
|
@ -1134,10 +1137,10 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) {
|
|||
if len(hints.ExtraOptions) > 0 {
|
||||
for _, eo := range hints.ExtraOptions {
|
||||
if config.ExtraOptions.Exists(eo) {
|
||||
glog.Infof("skipping extra-config %q.", eo)
|
||||
klog.Infof("skipping extra-config %q.", eo)
|
||||
continue
|
||||
}
|
||||
glog.Infof("auto setting extra-config to %q.", eo)
|
||||
klog.Infof("auto setting extra-config to %q.", eo)
|
||||
err = config.ExtraOptions.Set(eo)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "setting extra option %s", eo)
|
||||
|
@ -1151,12 +1154,12 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) {
|
|||
|
||||
if !cmd.Flags().Changed(containerRuntime) && hints.ContainerRuntime != "" {
|
||||
viper.Set(containerRuntime, hints.ContainerRuntime)
|
||||
glog.Infof("auto set %s to %q.", containerRuntime, hints.ContainerRuntime)
|
||||
klog.Infof("auto set %s to %q.", containerRuntime, hints.ContainerRuntime)
|
||||
}
|
||||
|
||||
if !cmd.Flags().Changed(cmdcfg.Bootstrapper) && hints.Bootstrapper != "" {
|
||||
viper.Set(cmdcfg.Bootstrapper, hints.Bootstrapper)
|
||||
glog.Infof("auto set %s to %q.", cmdcfg.Bootstrapper, hints.Bootstrapper)
|
||||
klog.Infof("auto set %s to %q.", cmdcfg.Bootstrapper, hints.Bootstrapper)
|
||||
|
||||
}
|
||||
|
||||
|
@ -1190,7 +1193,7 @@ func validateKubernetesVersion(old *config.ClusterConfig) {
|
|||
|
||||
ovs, err := semver.Make(strings.TrimPrefix(old.KubernetesConfig.KubernetesVersion, version.VersionPrefix))
|
||||
if err != nil {
|
||||
glog.Errorf("Error parsing old version %q: %v", old.KubernetesConfig.KubernetesVersion, err)
|
||||
klog.Errorf("Error parsing old version %q: %v", old.KubernetesConfig.KubernetesVersion, err)
|
||||
}
|
||||
|
||||
if nvs.LT(ovs) {
|
||||
|
@ -1246,7 +1249,7 @@ func validateDockerStorageDriver(drvName string) {
|
|||
}
|
||||
si, err := oci.DaemonInfo(drvName)
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to confirm that %s is using overlay2 storage driver; setting preload=false", drvName)
|
||||
klog.Warningf("Unable to confirm that %s is using overlay2 storage driver; setting preload=false", drvName)
|
||||
viper.Set(preload, false)
|
||||
return
|
||||
}
|
||||
|
@ -1263,3 +1266,10 @@ func exitIfNotForced(r reason.Kind, message string, v ...out.V) {
|
|||
}
|
||||
out.Error(r, message, v...)
|
||||
}
|
||||
|
||||
func exitGuestProvision(err error) {
|
||||
if errors.Cause(err) == oci.ErrInsufficientDockerStorage {
|
||||
exit.Message(reason.RsrcInsufficientDockerStorage, "preload extraction failed: \"No space left on device\"")
|
||||
}
|
||||
exit.Error(reason.GuestProvision, "error provisioning host", err)
|
||||
}
|
||||
|
|
|
@ -22,10 +22,10 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/drivers/kic"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
|
||||
|
@ -107,6 +107,7 @@ const (
|
|||
forceSystemd = "force-systemd"
|
||||
kicBaseImage = "base-image"
|
||||
startOutput = "output"
|
||||
ports = "ports"
|
||||
)
|
||||
|
||||
// initMinikubeFlags includes commandline flags for minikube.
|
||||
|
@ -197,6 +198,9 @@ func initDriverFlags() {
|
|||
startCmd.Flags().String(hypervVirtualSwitch, "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)")
|
||||
startCmd.Flags().Bool(hypervUseExternalSwitch, false, "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)")
|
||||
startCmd.Flags().String(hypervExternalAdapter, "", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)")
|
||||
|
||||
// docker & podman
|
||||
startCmd.Flags().StringSlice(ports, []string{}, "List of ports that should be exposed (docker and podman driver only)")
|
||||
}
|
||||
|
||||
// initNetworkingFlags inits the commandline flags for connectivity related flags for start
|
||||
|
@ -221,10 +225,10 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
|
|||
if existing != nil {
|
||||
cc = updateExistingConfigFromFlags(cmd, existing)
|
||||
} else {
|
||||
glog.Info("no existing cluster config was found, will generate one from the flags ")
|
||||
klog.Info("no existing cluster config was found, will generate one from the flags ")
|
||||
sysLimit, containerLimit, err := memoryLimits(drvName)
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to query memory limits: %+v", err)
|
||||
klog.Warningf("Unable to query memory limits: %+v", err)
|
||||
}
|
||||
|
||||
mem := suggestMemoryAllocation(sysLimit, containerLimit, viper.GetInt(nodes))
|
||||
|
@ -239,7 +243,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
|
|||
}
|
||||
} else {
|
||||
validateRequestedMemorySize(mem, drvName)
|
||||
glog.Infof("Using suggested %dMB memory alloc based on sys=%dMB, container=%dMB", mem, sysLimit, containerLimit)
|
||||
klog.Infof("Using suggested %dMB memory alloc based on sys=%dMB, container=%dMB", mem, sysLimit, containerLimit)
|
||||
}
|
||||
|
||||
diskSize, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
|
||||
|
@ -273,7 +277,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
|
|||
// Backwards compatibility with --enable-default-cni
|
||||
chosenCNI := viper.GetString(cniFlag)
|
||||
if viper.GetBool(enableDefaultCNI) && !cmd.Flags().Changed(cniFlag) {
|
||||
glog.Errorf("Found deprecated --enable-default-cni flag, setting --cni=bridge")
|
||||
klog.Errorf("Found deprecated --enable-default-cni flag, setting --cni=bridge")
|
||||
chosenCNI = "bridge"
|
||||
}
|
||||
|
||||
|
@ -311,6 +315,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
|
|||
HostOnlyNicType: viper.GetString(hostOnlyNicType),
|
||||
NatNicType: viper.GetString(natNicType),
|
||||
StartHostTimeout: viper.GetDuration(waitTimeout),
|
||||
ExposedPorts: viper.GetStringSlice(ports),
|
||||
KubernetesConfig: config.KubernetesConfig{
|
||||
KubernetesVersion: k8sVersion,
|
||||
ClusterName: ClusterFlagValue(),
|
||||
|
@ -340,12 +345,12 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
|
|||
}
|
||||
|
||||
if _, ok := cnm.(cni.Disabled); !ok {
|
||||
glog.Infof("Found %q CNI - setting NetworkPlugin=cni", cnm)
|
||||
klog.Infof("Found %q CNI - setting NetworkPlugin=cni", cnm)
|
||||
cc.KubernetesConfig.NetworkPlugin = "cni"
|
||||
}
|
||||
}
|
||||
|
||||
glog.Infof("config:\n%+v", cc)
|
||||
klog.Infof("config:\n%+v", cc)
|
||||
|
||||
r, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime})
|
||||
if err != nil {
|
||||
|
@ -372,19 +377,19 @@ func upgradeExistingConfig(cc *config.ClusterConfig) {
|
|||
}
|
||||
|
||||
if cc.VMDriver != "" && cc.Driver == "" {
|
||||
glog.Infof("config upgrade: Driver=%s", cc.VMDriver)
|
||||
klog.Infof("config upgrade: Driver=%s", cc.VMDriver)
|
||||
cc.Driver = cc.VMDriver
|
||||
}
|
||||
|
||||
if cc.Name == "" {
|
||||
glog.Infof("config upgrade: Name=%s", ClusterFlagValue())
|
||||
klog.Infof("config upgrade: Name=%s", ClusterFlagValue())
|
||||
cc.Name = ClusterFlagValue()
|
||||
}
|
||||
|
||||
if cc.KicBaseImage == "" {
|
||||
// defaults to kic.BaseImage
|
||||
cc.KicBaseImage = viper.GetString(kicBaseImage)
|
||||
glog.Infof("config upgrade: KicBaseImage=%s", cc.KicBaseImage)
|
||||
klog.Infof("config upgrade: KicBaseImage=%s", cc.KicBaseImage)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -413,10 +418,10 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
|
|||
}
|
||||
|
||||
if cc.Memory == 0 {
|
||||
glog.Info("Existing config file was missing memory. (could be an old minikube config), will use the default value")
|
||||
klog.Info("Existing config file was missing memory. (could be an old minikube config), will use the default value")
|
||||
memInMB, err := pkgutil.CalculateSizeInMB(viper.GetString(memory))
|
||||
if err != nil {
|
||||
glog.Warningf("error calculate memory size in mb : %v", err)
|
||||
klog.Warningf("error calculate memory size in mb : %v", err)
|
||||
}
|
||||
cc.Memory = memInMB
|
||||
}
|
||||
|
@ -424,7 +429,7 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
|
|||
if cmd.Flags().Changed(memory) {
|
||||
memInMB, err := pkgutil.CalculateSizeInMB(viper.GetString(memory))
|
||||
if err != nil {
|
||||
glog.Warningf("error calculate memory size in mb : %v", err)
|
||||
klog.Warningf("error calculate memory size in mb : %v", err)
|
||||
}
|
||||
if memInMB != cc.Memory {
|
||||
out.WarningT("You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.")
|
||||
|
@ -435,7 +440,7 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
|
|||
validateRequestedMemorySize(cc.Memory, cc.Driver)
|
||||
|
||||
if cc.CPUs == 0 {
|
||||
glog.Info("Existing config file was missing cpu. (could be an old minikube config), will use the default value")
|
||||
klog.Info("Existing config file was missing cpu. (could be an old minikube config), will use the default value")
|
||||
cc.CPUs = viper.GetInt(cpus)
|
||||
}
|
||||
if cmd.Flags().Changed(cpus) {
|
||||
|
@ -447,7 +452,7 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
|
|||
if cmd.Flags().Changed(humanReadableDiskSize) {
|
||||
memInMB, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
|
||||
if err != nil {
|
||||
glog.Warningf("error calculate disk size in mb : %v", err)
|
||||
klog.Warningf("error calculate disk size in mb : %v", err)
|
||||
}
|
||||
|
||||
if memInMB != existing.DiskSize {
|
||||
|
@ -547,6 +552,10 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
|
|||
cc.KubernetesConfig.NodePort = viper.GetInt(apiServerPort)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(vsockPorts) {
|
||||
cc.ExposedPorts = viper.GetStringSlice(ports)
|
||||
}
|
||||
|
||||
// pre minikube 1.9.2 cc.KubernetesConfig.NodePort was not populated.
|
||||
// in minikube config there were two fields for api server port.
|
||||
// one in cc.KubernetesConfig.NodePort and one in cc.Nodes.Port
|
||||
|
@ -589,7 +598,7 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
|
|||
|
||||
if cmd.Flags().Changed(enableDefaultCNI) && !cmd.Flags().Changed(cniFlag) {
|
||||
if viper.GetBool(enableDefaultCNI) {
|
||||
glog.Errorf("Found deprecated --enable-default-cni flag, setting --cni=bridge")
|
||||
klog.Errorf("Found deprecated --enable-default-cni flag, setting --cni=bridge")
|
||||
cc.KubernetesConfig.CNI = "bridge"
|
||||
}
|
||||
}
|
||||
|
@ -614,25 +623,25 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
|
|||
// returns map of components to wait for
|
||||
func interpretWaitFlag(cmd cobra.Command) map[string]bool {
|
||||
if !cmd.Flags().Changed(waitComponents) {
|
||||
glog.Infof("Wait components to verify : %+v", kverify.DefaultComponents)
|
||||
klog.Infof("Wait components to verify : %+v", kverify.DefaultComponents)
|
||||
return kverify.DefaultComponents
|
||||
}
|
||||
|
||||
waitFlags, err := cmd.Flags().GetStringSlice(waitComponents)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to read --wait from flags: %v.\n Moving on will use the default wait components: %+v", err, kverify.DefaultComponents)
|
||||
klog.Warningf("Failed to read --wait from flags: %v.\n Moving on will use the default wait components: %+v", err, kverify.DefaultComponents)
|
||||
return kverify.DefaultComponents
|
||||
}
|
||||
|
||||
if len(waitFlags) == 1 {
|
||||
// respecting legacy flag before minikube 1.9.0, wait flag was boolean
|
||||
if waitFlags[0] == "false" || waitFlags[0] == "none" {
|
||||
glog.Infof("Waiting for no components: %+v", kverify.NoComponents)
|
||||
klog.Infof("Waiting for no components: %+v", kverify.NoComponents)
|
||||
return kverify.NoComponents
|
||||
}
|
||||
// respecting legacy flag before minikube 1.9.0, wait flag was boolean
|
||||
if waitFlags[0] == "true" || waitFlags[0] == "all" {
|
||||
glog.Infof("Waiting for all components: %+v", kverify.AllComponents)
|
||||
klog.Infof("Waiting for all components: %+v", kverify.AllComponents)
|
||||
return kverify.AllComponents
|
||||
}
|
||||
}
|
||||
|
@ -648,9 +657,9 @@ func interpretWaitFlag(cmd cobra.Command) map[string]bool {
|
|||
}
|
||||
}
|
||||
if !seen {
|
||||
glog.Warningf("The value %q is invalid for --wait flag. valid options are %q", wc, strings.Join(kverify.AllComponentsList, ","))
|
||||
klog.Warningf("The value %q is invalid for --wait flag. valid options are %q", wc, strings.Join(kverify.AllComponentsList, ","))
|
||||
}
|
||||
}
|
||||
glog.Infof("Waiting for components: %+v", waitComponents)
|
||||
klog.Infof("Waiting for components: %+v", waitComponents)
|
||||
return waitComponents
|
||||
}
|
||||
|
|
|
@ -31,9 +31,9 @@ import (
|
|||
|
||||
"github.com/docker/machine/libmachine"
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
|
@ -57,6 +57,7 @@ var (
|
|||
|
||||
const (
|
||||
// Additional legacy states:
|
||||
|
||||
// Configured means configured
|
||||
Configured = "Configured" // ~state.Saved
|
||||
// Misconfigured means misconfigured
|
||||
|
@ -67,7 +68,9 @@ const (
|
|||
Irrelevant = "Irrelevant"
|
||||
|
||||
// New status modes, based roughly on HTTP/SMTP standards
|
||||
|
||||
// 1xx signifies a transitional state. If retried, it will soon return a 2xx, 4xx, or 5xx
|
||||
|
||||
Starting = 100
|
||||
Pausing = 101
|
||||
Unpausing = 102
|
||||
|
@ -75,21 +78,29 @@ const (
|
|||
Deleting = 120
|
||||
|
||||
// 2xx signifies that the API Server is able to service requests
|
||||
|
||||
OK = 200
|
||||
Warning = 203
|
||||
|
||||
// 4xx signifies an error that requires help from the client to resolve
|
||||
|
||||
NotFound = 404
|
||||
Stopped = 405
|
||||
Paused = 418 // I'm a teapot!
|
||||
|
||||
// 5xx signifies a server-side error (that may be retryable)
|
||||
|
||||
Error = 500
|
||||
InsufficientStorage = 507
|
||||
Unknown = 520
|
||||
)
|
||||
|
||||
var (
|
||||
exitCodeToHTTPCode = map[int]int{
|
||||
// exit code 26 corresponds to insufficient storage
|
||||
26: 507,
|
||||
}
|
||||
|
||||
codeNames = map[int]string{
|
||||
100: "Starting",
|
||||
101: "Pausing",
|
||||
|
@ -202,21 +213,21 @@ var statusCmd = &cobra.Command{
|
|||
|
||||
st, err := nodeStatus(api, *cc, *n)
|
||||
if err != nil {
|
||||
glog.Errorf("status error: %v", err)
|
||||
klog.Errorf("status error: %v", err)
|
||||
}
|
||||
statuses = append(statuses, st)
|
||||
} else {
|
||||
for _, n := range cc.Nodes {
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
glog.Infof("checking status of %s ...", machineName)
|
||||
klog.Infof("checking status of %s ...", machineName)
|
||||
st, err := nodeStatus(api, *cc, n)
|
||||
glog.Infof("%s status: %+v", machineName, st)
|
||||
klog.Infof("%s status: %+v", machineName, st)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("status error: %v", err)
|
||||
klog.Errorf("status error: %v", err)
|
||||
}
|
||||
if st.Host == Nonexistent {
|
||||
glog.Errorf("The %q host does not exist!", machineName)
|
||||
klog.Errorf("The %q host does not exist!", machineName)
|
||||
}
|
||||
statuses = append(statuses, st)
|
||||
}
|
||||
|
@ -280,7 +291,7 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St
|
|||
}
|
||||
|
||||
hs, err := machine.Status(api, name)
|
||||
glog.Infof("%s host status = %q (err=%v)", name, hs, err)
|
||||
klog.Infof("%s host status = %q (err=%v)", name, hs, err)
|
||||
if err != nil {
|
||||
return st, errors.Wrap(err, "host")
|
||||
}
|
||||
|
@ -293,7 +304,7 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St
|
|||
|
||||
// If it's not running, quickly bail out rather than delivering conflicting messages
|
||||
if st.Host != state.Running.String() {
|
||||
glog.Infof("host is not running, skipping remaining checks")
|
||||
klog.Infof("host is not running, skipping remaining checks")
|
||||
st.APIServer = st.Host
|
||||
st.Kubelet = st.Host
|
||||
st.Kubeconfig = st.Host
|
||||
|
@ -302,7 +313,7 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St
|
|||
|
||||
// We have a fully operational host, now we can check for details
|
||||
if _, err := cluster.DriverIP(api, name); err != nil {
|
||||
glog.Errorf("failed to get driver ip: %v", err)
|
||||
klog.Errorf("failed to get driver ip: %v", err)
|
||||
st.Host = state.Error.String()
|
||||
return st, err
|
||||
}
|
||||
|
@ -326,7 +337,7 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St
|
|||
// Check storage
|
||||
p, err := machine.DiskUsed(cr, "/var")
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get storage capacity of /var: %v", err)
|
||||
klog.Errorf("failed to get storage capacity of /var: %v", err)
|
||||
st.Host = state.Error.String()
|
||||
return st, err
|
||||
}
|
||||
|
@ -334,8 +345,7 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St
|
|||
st.Host = codeNames[InsufficientStorage]
|
||||
}
|
||||
|
||||
stk := kverify.KubeletStatus(cr)
|
||||
glog.Infof("%s kubelet status = %s", name, stk)
|
||||
stk := kverify.ServiceStatus(cr, "kubelet")
|
||||
st.Kubelet = stk.String()
|
||||
|
||||
// Early exit for worker nodes
|
||||
|
@ -345,21 +355,21 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St
|
|||
|
||||
hostname, _, port, err := driver.ControlPlaneEndpoint(&cc, &n, host.DriverName)
|
||||
if err != nil {
|
||||
glog.Errorf("forwarded endpoint: %v", err)
|
||||
klog.Errorf("forwarded endpoint: %v", err)
|
||||
st.Kubeconfig = Misconfigured
|
||||
} else {
|
||||
err := kubeconfig.VerifyEndpoint(cc.Name, hostname, port)
|
||||
if err != nil {
|
||||
glog.Errorf("kubeconfig endpoint: %v", err)
|
||||
klog.Errorf("kubeconfig endpoint: %v", err)
|
||||
st.Kubeconfig = Misconfigured
|
||||
}
|
||||
}
|
||||
|
||||
sta, err := kverify.APIServerStatus(cr, hostname, port)
|
||||
glog.Infof("%s apiserver status = %s (err=%v)", name, stk, err)
|
||||
klog.Infof("%s apiserver status = %s (err=%v)", name, stk, err)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorln("Error apiserver status:", err)
|
||||
klog.Errorln("Error apiserver status:", err)
|
||||
st.APIServer = state.Error.String()
|
||||
} else {
|
||||
st.APIServer = sta.String()
|
||||
|
@ -442,14 +452,18 @@ func readEventLog(name string) ([]cloudevents.Event, time.Time, error) {
|
|||
|
||||
// clusterState converts Status structs into a ClusterState struct
|
||||
func clusterState(sts []*Status) ClusterState {
|
||||
sc := statusCode(sts[0].Host)
|
||||
statusName := sts[0].APIServer
|
||||
if sts[0].Host == codeNames[InsufficientStorage] {
|
||||
statusName = sts[0].Host
|
||||
}
|
||||
sc := statusCode(statusName)
|
||||
cs := ClusterState{
|
||||
BinaryVersion: version.GetVersion(),
|
||||
|
||||
BaseState: BaseState{
|
||||
Name: ClusterFlagValue(),
|
||||
StatusCode: sc,
|
||||
StatusName: sts[0].Host,
|
||||
StatusName: statusName,
|
||||
StatusDetail: codeDetails[sc],
|
||||
},
|
||||
|
||||
|
@ -485,7 +499,7 @@ func clusterState(sts []*Status) ClusterState {
|
|||
|
||||
evs, mtime, err := readEventLog(sts[0].Name)
|
||||
if err != nil {
|
||||
glog.Errorf("unable to read event log: %v", err)
|
||||
klog.Errorf("unable to read event log: %v", err)
|
||||
return cs
|
||||
}
|
||||
|
||||
|
@ -493,12 +507,12 @@ func clusterState(sts []*Status) ClusterState {
|
|||
var finalStep map[string]string
|
||||
|
||||
for _, ev := range evs {
|
||||
// glog.Infof("read event: %+v", ev)
|
||||
// klog.Infof("read event: %+v", ev)
|
||||
if ev.Type() == "io.k8s.sigs.minikube.step" {
|
||||
var data map[string]string
|
||||
err := ev.DataAs(&data)
|
||||
if err != nil {
|
||||
glog.Errorf("unable to parse data: %v\nraw data: %s", err, ev.Data())
|
||||
klog.Errorf("unable to parse data: %v\nraw data: %s", err, ev.Data())
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -508,7 +522,7 @@ func clusterState(sts []*Status) ClusterState {
|
|||
case string(register.Done):
|
||||
transientCode = 0
|
||||
case string(register.Stopping):
|
||||
glog.Infof("%q == %q", data["name"], register.Stopping)
|
||||
klog.Infof("%q == %q", data["name"], register.Stopping)
|
||||
transientCode = Stopping
|
||||
case string(register.Deleting):
|
||||
transientCode = Deleting
|
||||
|
@ -519,33 +533,36 @@ func clusterState(sts []*Status) ClusterState {
|
|||
}
|
||||
|
||||
finalStep = data
|
||||
glog.Infof("transient code %d (%q) for step: %+v", transientCode, codeNames[transientCode], data)
|
||||
klog.Infof("transient code %d (%q) for step: %+v", transientCode, codeNames[transientCode], data)
|
||||
}
|
||||
if ev.Type() == "io.k8s.sigs.minikube.error" {
|
||||
var data map[string]string
|
||||
err := ev.DataAs(&data)
|
||||
if err != nil {
|
||||
glog.Errorf("unable to parse data: %v\nraw data: %s", err, ev.Data())
|
||||
klog.Errorf("unable to parse data: %v\nraw data: %s", err, ev.Data())
|
||||
continue
|
||||
}
|
||||
exitCode, err := strconv.Atoi(data["exitcode"])
|
||||
if err != nil {
|
||||
glog.Errorf("unable to convert exit code to int: %v", err)
|
||||
klog.Errorf("unable to convert exit code to int: %v", err)
|
||||
continue
|
||||
}
|
||||
if val, ok := exitCodeToHTTPCode[exitCode]; ok {
|
||||
exitCode = val
|
||||
}
|
||||
transientCode = exitCode
|
||||
for _, n := range cs.Nodes {
|
||||
n.StatusCode = transientCode
|
||||
n.StatusName = codeNames[n.StatusCode]
|
||||
}
|
||||
|
||||
glog.Infof("transient code %d (%q) for step: %+v", transientCode, codeNames[transientCode], data)
|
||||
klog.Infof("transient code %d (%q) for step: %+v", transientCode, codeNames[transientCode], data)
|
||||
}
|
||||
}
|
||||
|
||||
if finalStep != nil {
|
||||
if mtime.Before(time.Now().Add(-10 * time.Minute)) {
|
||||
glog.Warningf("event stream is too old (%s) to be considered a transient state", mtime)
|
||||
klog.Warningf("event stream is too old (%s) to be considered a transient state", mtime)
|
||||
} else {
|
||||
cs.Step = strings.TrimSpace(finalStep["name"])
|
||||
cs.StepDetail = strings.TrimSpace(finalStep["message"])
|
||||
|
|
|
@ -21,10 +21,10 @@ import (
|
|||
|
||||
"github.com/docker/machine/libmachine"
|
||||
"github.com/docker/machine/libmachine/mcnerror"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
|
@ -48,9 +48,8 @@ var (
|
|||
var stopCmd = &cobra.Command{
|
||||
Use: "stop",
|
||||
Short: "Stops a running local Kubernetes cluster",
|
||||
Long: `Stops a local Kubernetes cluster running in Virtualbox. This command stops the VM
|
||||
itself, leaving all files intact. The cluster can be started again with the "start" command.`,
|
||||
Run: runStop,
|
||||
Long: `Stops a local Kubernetes cluster. This command stops the underlying VM or container, but keeps user data intact. The cluster can be started again with the "start" command.`,
|
||||
Run: runStop,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -60,8 +59,6 @@ func init() {
|
|||
if err := viper.GetViper().BindPFlags(stopCmd.Flags()); err != nil {
|
||||
exit.Error(reason.InternalFlagsBind, "unable to bind flags", err)
|
||||
}
|
||||
|
||||
RootCmd.AddCommand(stopCmd)
|
||||
}
|
||||
|
||||
// runStop handles the executes the flow of "minikube stop"
|
||||
|
@ -74,7 +71,7 @@ func runStop(cmd *cobra.Command, args []string) {
|
|||
if stopAll {
|
||||
validProfiles, _, err := config.ListProfiles()
|
||||
if err != nil {
|
||||
glog.Warningf("'error loading profiles in minikube home %q: %v", localpath.MiniPath(), err)
|
||||
klog.Warningf("'error loading profiles in minikube home %q: %v", localpath.MiniPath(), err)
|
||||
}
|
||||
for _, profile := range validProfiles {
|
||||
profilesToStop = append(profilesToStop, profile.Name)
|
||||
|
@ -117,8 +114,8 @@ func stopProfile(profile string) int {
|
|||
}
|
||||
|
||||
if !keepActive {
|
||||
if err := kubeconfig.UnsetCurrentContext(profile, kubeconfig.PathFromEnv()); err != nil {
|
||||
exit.Error(reason.HostKubeconfigUnset, "update config", err)
|
||||
if err := kubeconfig.DeleteContext(profile, kubeconfig.PathFromEnv()); err != nil {
|
||||
exit.Error(reason.HostKubeconfigDeleteCtx, "delete ctx", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -133,7 +130,7 @@ func stop(api libmachine.API, machineName string) bool {
|
|||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
glog.Warningf("stop host returned error: %v", err)
|
||||
klog.Warningf("stop host returned error: %v", err)
|
||||
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
|
|
|
@ -23,9 +23,9 @@ import (
|
|||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/kapi"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
|
@ -54,9 +54,9 @@ var tunnelCmd = &cobra.Command{
|
|||
co := mustload.Healthy(cname)
|
||||
|
||||
if cleanup {
|
||||
glog.Info("Checking for tunnels to cleanup...")
|
||||
klog.Info("Checking for tunnels to cleanup...")
|
||||
if err := manager.CleanupNotRunningTunnels(); err != nil {
|
||||
glog.Errorf("error cleaning up: %s", err)
|
||||
klog.Errorf("error cleaning up: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,10 +19,10 @@ package cmd
|
|||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
|
@ -48,7 +48,7 @@ var unpauseCmd = &cobra.Command{
|
|||
co := mustload.Running(cname)
|
||||
register.Reg.SetStep(register.Unpausing)
|
||||
|
||||
glog.Infof("namespaces: %v keys: %v", namespaces, viper.AllSettings())
|
||||
klog.Infof("namespaces: %v keys: %v", namespaces, viper.AllSettings())
|
||||
if allNamespaces {
|
||||
namespaces = nil // all
|
||||
} else {
|
||||
|
@ -60,7 +60,7 @@ var unpauseCmd = &cobra.Command{
|
|||
ids := []string{}
|
||||
|
||||
for _, n := range co.Config.Nodes {
|
||||
glog.Infof("node: %+v", n)
|
||||
klog.Infof("node: %+v", n)
|
||||
|
||||
// Use node-name if available, falling back to cluster name
|
||||
name := n.Name
|
||||
|
|
|
@ -26,6 +26,8 @@ import (
|
|||
|
||||
// initflag must be imported before any other minikube pkg.
|
||||
// Fix for https://github.com/kubernetes/minikube/issues/4866
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
_ "k8s.io/minikube/pkg/initflag"
|
||||
|
||||
// Register drivers
|
||||
|
@ -36,7 +38,6 @@ import (
|
|||
|
||||
mlog "github.com/docker/machine/libmachine/log"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/slowjam/pkg/stacklog"
|
||||
"github.com/pkg/profile"
|
||||
|
||||
|
@ -58,7 +59,7 @@ var (
|
|||
|
||||
func main() {
|
||||
bridgeLogMessages()
|
||||
defer glog.Flush()
|
||||
defer klog.Flush()
|
||||
|
||||
s := stacklog.MustStartFromEnv("STACKLOG_PATH")
|
||||
defer s.Stop()
|
||||
|
@ -74,7 +75,7 @@ func main() {
|
|||
cmd.Execute()
|
||||
}
|
||||
|
||||
// bridgeLogMessages bridges non-glog logs into glog
|
||||
// bridgeLogMessages bridges non-glog logs into klog
|
||||
func bridgeLogMessages() {
|
||||
log.SetFlags(log.Lshortfile)
|
||||
log.SetOutput(stdLogBridge{})
|
||||
|
@ -85,12 +86,12 @@ func bridgeLogMessages() {
|
|||
|
||||
type stdLogBridge struct{}
|
||||
|
||||
// Write parses the standard logging line and passes its components to glog
|
||||
// Write parses the standard logging line and passes its components to klog
|
||||
func (lb stdLogBridge) Write(b []byte) (n int, err error) {
|
||||
// Split "d.go:23: message" into "d.go", "23", and "message".
|
||||
parts := bytes.SplitN(b, []byte{':'}, 3)
|
||||
if len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
|
||||
glog.Errorf("bad log format: %s", b)
|
||||
klog.Errorf("bad log format: %s", b)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -101,21 +102,21 @@ func (lb stdLogBridge) Write(b []byte) (n int, err error) {
|
|||
text = fmt.Sprintf("bad line number: %s", b)
|
||||
line = 0
|
||||
}
|
||||
glog.Infof("stdlog: %s:%d %s", file, line, text)
|
||||
klog.Infof("stdlog: %s:%d %s", file, line, text)
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// libmachine log bridge
|
||||
type machineLogBridge struct{}
|
||||
|
||||
// Write passes machine driver logs to glog
|
||||
// Write passes machine driver logs to klog
|
||||
func (lb machineLogBridge) Write(b []byte) (n int, err error) {
|
||||
if machineLogErrorRe.Match(b) {
|
||||
glog.Errorf("libmachine: %s", b)
|
||||
klog.Errorf("libmachine: %s", b)
|
||||
} else if machineLogWarningRe.Match(b) {
|
||||
glog.Warningf("libmachine: %s", b)
|
||||
klog.Warningf("libmachine: %s", b)
|
||||
} else {
|
||||
glog.Infof("libmachine: %s", b)
|
||||
klog.Infof("libmachine: %s", b)
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
|
|
@ -18,12 +18,17 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/perf/monitor"
|
||||
)
|
||||
|
||||
func main() {
|
||||
for {
|
||||
log.Print("~~~~~~~~~ Starting performance analysis ~~~~~~~~~~~~~~")
|
||||
if err := analyzePerformance(context.Background()); err != nil {
|
||||
log.Printf("error executing performance analysis: %v", err)
|
||||
}
|
||||
|
@ -36,5 +41,32 @@ func main() {
|
|||
// 2. running mkcmp against those PRs
|
||||
// 3. commenting results on those PRs
|
||||
func analyzePerformance(ctx context.Context) error {
|
||||
client := monitor.NewClient(ctx, monitor.GithubOwner, monitor.GithubRepo)
|
||||
prs, err := client.ListOpenPRsWithLabel(monitor.OkToTestLabel)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "listing open prs")
|
||||
}
|
||||
log.Print("got prs:", prs)
|
||||
for _, pr := range prs {
|
||||
log.Printf("~~~ Analyzing PR %d ~~~", pr)
|
||||
newCommitsExist, err := client.NewCommitsExist(pr, monitor.BotName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !newCommitsExist {
|
||||
log.Println("New commits don't exist, skipping rerun...")
|
||||
continue
|
||||
}
|
||||
var message string
|
||||
message, err = monitor.RunMkcmp(ctx, pr)
|
||||
if err != nil {
|
||||
message = fmt.Sprintf("Error: %v\n%s", err, message)
|
||||
}
|
||||
log.Printf("message for pr %d:\n%s\n", pr, message)
|
||||
if err := client.CommentOnPR(pr, message); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Print("successfully commented on PR")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/storage"
|
||||
)
|
||||
|
||||
|
@ -36,7 +36,7 @@ func main() {
|
|||
flag.Parse()
|
||||
|
||||
if err := storage.StartStorageProvisioner(pvDir); err != nil {
|
||||
glog.Exit(err)
|
||||
klog.Exit(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -86,6 +86,7 @@ spec:
|
|||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
gcp-auth-skip-secret: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
|
|
|
@ -90,7 +90,7 @@ data:
|
|||
|
||||
# Multi-line parsing is required for all the kube logs because very large log
|
||||
# statements, such as those that include entire object bodies, get split into
|
||||
# multiple lines by glog.
|
||||
# multiple lines by klog.
|
||||
|
||||
# Example:
|
||||
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
|
||||
|
|
|
@ -80,7 +80,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: gcp-auth
|
||||
image: gcr.io/k8s-minikube/gcp-auth-webhook:v0.0.2
|
||||
image: gcr.io/k8s-minikube/gcp-auth-webhook:v0.0.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
|
@ -131,10 +131,17 @@ metadata:
|
|||
app: gcp-auth
|
||||
webhooks:
|
||||
- name: gcp-auth-mutate.k8s.io
|
||||
failurePolicy: Ignore
|
||||
objectSelector:
|
||||
matchExpressions:
|
||||
- key: gcp-auth-skip-secret
|
||||
operator: DoesNotExist
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: name
|
||||
operator: NotIn
|
||||
values:
|
||||
- kube-system
|
||||
sideEffects: None
|
||||
admissionReviewVersions: ["v1","v1beta1"]
|
||||
clientConfig:
|
||||
|
|
|
@ -20,6 +20,7 @@ metadata:
|
|||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/minikube-addons: gvisor
|
||||
gcp-auth-skip-secret: "true"
|
||||
spec:
|
||||
hostPID: true
|
||||
containers:
|
||||
|
|
|
@ -34,6 +34,7 @@ metadata:
|
|||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
app.kubernetes.io/part-of: kube-system
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
gcp-auth-skip-secret: "true"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
|
|
@ -44,6 +44,7 @@ spec:
|
|||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
gcp-auth-skip-secret: "true"
|
||||
spec:
|
||||
serviceAccountName: ingress-nginx
|
||||
containers:
|
||||
|
|
|
@ -100,7 +100,7 @@ spec:
|
|||
hostNetwork: true
|
||||
containers:
|
||||
- name: storage-provisioner
|
||||
image: {{default "gcr.io/k8s-minikube" .ImageRepository}}/storage-provisioner{{.ExoticArch}}:{{.StorageProvisionerVersion}}
|
||||
image: {{default "gcr.io/k8s-minikube" .ImageRepository}}/storage-provisioner:{{.StorageProvisionerVersion}}
|
||||
command: ["/storage-provisioner"]
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
|
|
|
@ -4,8 +4,8 @@ BR2_OPTIMIZE_2=y
|
|||
BR2_TOOLCHAIN_BUILDROOT_VENDOR="minikube"
|
||||
BR2_TOOLCHAIN_BUILDROOT_GLIBC=y
|
||||
BR2_PACKAGE_HOST_LINUX_HEADERS_CUSTOM_4_19=y
|
||||
BR2_BINUTILS_VERSION_2_30_X=y
|
||||
BR2_GCC_VERSION_7_X=y
|
||||
BR2_BINUTILS_VERSION_2_32_X=y
|
||||
BR2_GCC_VERSION_8_X=y
|
||||
BR2_TOOLCHAIN_BUILDROOT_CXX=y
|
||||
BR2_GCC_ENABLE_LTO=y
|
||||
BR2_TARGET_GENERIC_HOSTNAME="minikube"
|
||||
|
@ -19,6 +19,9 @@ BR2_ROOTFS_USERS_TABLES="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/use
|
|||
BR2_ROOTFS_OVERLAY="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/rootfs-overlay"
|
||||
BR2_GLOBAL_PATCH_DIR="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/patches"
|
||||
BR2_LINUX_KERNEL=y
|
||||
BR2_LINUX_KERNEL_LATEST_VERSION=n
|
||||
BR2_LINUX_KERNEL_CUSTOM_VERSION=y
|
||||
BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="4.19.114"
|
||||
BR2_LINUX_KERNEL_BZIMAGE=y
|
||||
BR2_LINUX_KERNEL_LZ4=y
|
||||
BR2_LINUX_KERNEL_USE_CUSTOM_CONFIG=y
|
||||
|
@ -36,7 +39,6 @@ BR2_PACKAGE_SSHFS=y
|
|||
BR2_PACKAGE_XFSPROGS=y
|
||||
BR2_PACKAGE_PARTED=y
|
||||
BR2_PACKAGE_CA_CERTIFICATES=y
|
||||
BR2_PACKAGE_CURL=y
|
||||
BR2_PACKAGE_BRIDGE_UTILS=y
|
||||
BR2_PACKAGE_EBTABLES=y
|
||||
BR2_PACKAGE_ETHTOOL=y
|
||||
|
@ -53,7 +55,6 @@ BR2_PACKAGE_LIBCURL_CURL=y
|
|||
BR2_PACKAGE_LIBOPENSSL=y
|
||||
BR2_PACKAGE_LIBOPENSSL_BIN=y
|
||||
BR2_PACKAGE_OPENVMTOOLS=y
|
||||
BR2_PACKAGE_OPENVMTOOLS_PROCPS=y
|
||||
BR2_PACKAGE_SYSTEMD_LOGIND=y
|
||||
BR2_PACKAGE_SYSTEMD_MACHINED=y
|
||||
BR2_PACKAGE_SYSTEMD_VCONSOLE=y
|
||||
|
|
|
@ -2,4 +2,4 @@ config BR2_PACKAGE_CNI_PLUGINS
|
|||
bool "cni-plugins"
|
||||
default y
|
||||
depends on BR2_x86_64
|
||||
depends on BR2_PACKAGE_HOST_GO_ARCH_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_TARGET_ARCH_SUPPORTS
|
||||
|
|
|
@ -13,6 +13,7 @@ CNI_PLUGINS_LICENSE_FILES = LICENSE
|
|||
CNI_PLUGINS_DEPENDENCIES = host-go
|
||||
|
||||
CNI_PLUGINS_MAKE_ENV = \
|
||||
$(GO_TARGET_ENV) \
|
||||
CGO_ENABLED=0 \
|
||||
GO111MODULE=off
|
||||
|
||||
|
|
|
@ -2,4 +2,4 @@ config BR2_PACKAGE_CNI
|
|||
bool "cni"
|
||||
default y
|
||||
depends on BR2_x86_64
|
||||
depends on BR2_PACKAGE_HOST_GO_ARCH_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_TARGET_ARCH_SUPPORTS
|
||||
|
|
|
@ -14,6 +14,7 @@ CNI_DEPENDENCIES = host-go
|
|||
|
||||
CNI_GOPATH = $(@D)/_output
|
||||
CNI_MAKE_ENV = \
|
||||
$(GO_TARGET_ENV) \
|
||||
CGO_ENABLED=0 \
|
||||
GO111MODULE=off \
|
||||
GOPATH="$(CNI_GOPATH)" \
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
config BR2_PACKAGE_CONMON
|
||||
bool "conmon"
|
||||
depends on BR2_PACKAGE_HOST_GO_ARCH_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_CGO_LINKING_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_TARGET_ARCH_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_TARGET_CGO_LINKING_SUPPORTS
|
||||
depends on BR2_TOOLCHAIN_HAS_THREADS
|
||||
select BR2_PACKAGE_LIBGLIB2
|
||||
select BR2_PACKAGE_SYSTEMD
|
||||
|
|
|
@ -2,8 +2,8 @@ config BR2_PACKAGE_CONTAINERD_BIN
|
|||
bool "containerd-bin"
|
||||
default y
|
||||
depends on BR2_x86_64
|
||||
depends on BR2_PACKAGE_HOST_GO_ARCH_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_CGO_LINKING_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_TARGET_ARCH_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_TARGET_CGO_LINKING_SUPPORTS
|
||||
depends on BR2_TOOLCHAIN_HAS_THREADS
|
||||
depends on BR2_USE_MMU # lvm2
|
||||
depends on !BR2_STATIC_LIBS # lvm2
|
||||
|
|
|
@ -10,6 +10,7 @@ CONTAINERD_BIN_SOURCE = $(CONTAINERD_BIN_VERSION).tar.gz
|
|||
CONTAINERD_BIN_DEPENDENCIES = host-go libgpgme
|
||||
CONTAINERD_BIN_GOPATH = $(@D)/_output
|
||||
CONTAINERD_BIN_ENV = \
|
||||
$(GO_TARGET_ENV) \
|
||||
CGO_ENABLED=1 \
|
||||
GO111MODULE=off \
|
||||
GOPATH="$(CONTAINERD_BIN_GOPATH)" \
|
||||
|
|
|
@ -2,8 +2,8 @@ config BR2_PACKAGE_CRIO_BIN
|
|||
bool "crio-bin"
|
||||
default y
|
||||
depends on BR2_x86_64
|
||||
depends on BR2_PACKAGE_HOST_GO_ARCH_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_CGO_LINKING_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_TARGET_ARCH_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_TARGET_CGO_LINKING_SUPPORTS
|
||||
depends on BR2_TOOLCHAIN_HAS_THREADS
|
||||
depends on BR2_USE_MMU # lvm2
|
||||
depends on !BR2_STATIC_LIBS # lvm2
|
||||
|
|
|
@ -11,6 +11,7 @@ CRIO_BIN_SOURCE = $(CRIO_BIN_VERSION).tar.gz
|
|||
CRIO_BIN_DEPENDENCIES = host-go libgpgme
|
||||
CRIO_BIN_GOPATH = $(@D)/_output
|
||||
CRIO_BIN_ENV = \
|
||||
$(GO_TARGET_ENV) \
|
||||
CGO_ENABLED=1 \
|
||||
GO111MODULE=off \
|
||||
GOPATH="$(CRIO_BIN_GOPATH)" \
|
||||
|
|
|
@ -2,8 +2,8 @@ config BR2_PACKAGE_PODMAN
|
|||
bool "podman"
|
||||
default y
|
||||
depends on BR2_x86_64
|
||||
depends on BR2_PACKAGE_HOST_GO_ARCH_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_CGO_LINKING_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_TARGET_ARCH_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_TARGET_CGO_LINKING_SUPPORTS
|
||||
depends on BR2_TOOLCHAIN_HAS_THREADS
|
||||
select BR2_PACKAGE_RUNC_MASTER
|
||||
select BR2_PACKAGE_CONMON
|
||||
|
|
|
@ -10,6 +10,7 @@ PODMAN_DEPENDENCIES = host-go
|
|||
|
||||
PODMAN_GOPATH = $(@D)/_output
|
||||
PODMAN_BIN_ENV = \
|
||||
$(GO_TARGET_ENV) \
|
||||
CGO_ENABLED=1 \
|
||||
GOPATH="$(PODMAN_GOPATH)" \
|
||||
GOBIN="$(PODMAN_GOPATH)/bin" \
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
config BR2_PACKAGE_RUNC_MASTER
|
||||
bool "runc-master"
|
||||
depends on BR2_PACKAGE_HOST_GO_ARCH_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_CGO_LINKING_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_TARGET_ARCH_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_TARGET_CGO_LINKING_SUPPORTS
|
||||
depends on BR2_TOOLCHAIN_HAS_THREADS
|
||||
help
|
||||
runC is a CLI tool for spawning and running containers
|
||||
|
@ -12,6 +12,6 @@ config BR2_PACKAGE_RUNC_MASTER
|
|||
https://github.com/opencontainers/runc
|
||||
|
||||
comment "runc needs a toolchain w/ threads"
|
||||
depends on BR2_PACKAGE_HOST_GO_ARCH_SUPPORTS && \
|
||||
BR2_PACKAGE_HOST_GO_CGO_LINKING_SUPPORTS
|
||||
depends on BR2_PACKAGE_HOST_GO_TARGET_ARCH_SUPPORTS && \
|
||||
BR2_PACKAGE_HOST_GO_TARGET_CGO_LINKING_SUPPORTS
|
||||
depends on !BR2_TOOLCHAIN_HAS_THREADS
|
||||
|
|
|
@ -14,7 +14,8 @@ RUNC_MASTER_LICENSE_FILES = LICENSE
|
|||
RUNC_MASTER_DEPENDENCIES = host-go
|
||||
|
||||
RUNC_MASTER_GOPATH = $(@D)/_output
|
||||
RUNC_MASTER_MAKE_ENV = $(HOST_GO_TARGET_ENV) \
|
||||
RUNC_MASTER_MAKE_ENV = \
|
||||
$(GO_TARGET_ENV) \
|
||||
CGO_ENABLED=1 \
|
||||
GO111MODULE=off \
|
||||
GOPATH="$(RUNC_MASTER_GOPATH)" \
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
# Turn on Source Address Verification in all interfaces to
|
||||
# prevent some spoofing attacks.
|
||||
net.ipv4.conf.default.rp_filter=1
|
||||
net.ipv4.conf.all.rp_filter=1
|
|
@ -1,44 +1,125 @@
|
|||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# kind node base image
|
||||
#
|
||||
# For systemd + docker configuration used below, see the following references:
|
||||
# https://www.freedesktop.org/wiki/Software/systemd/ContainerInterface/
|
||||
|
||||
# start from ubuntu 20.04, this image is reasonably small as a starting point
|
||||
# for a kubernetes node image, it doesn't contain much we don't need
|
||||
FROM ubuntu:focal-20200423
|
||||
|
||||
# copy in static files (configs, scripts)
|
||||
COPY 10-network-security.conf /etc/sysctl.d/10-network-security.conf
|
||||
COPY clean-install /usr/local/bin/clean-install
|
||||
COPY entrypoint /usr/local/bin/entrypoint
|
||||
|
||||
# Install dependencies, first from apt, then from release tarballs.
|
||||
# NOTE: we use one RUN to minimize layers.
|
||||
#
|
||||
# First we must ensure that our util scripts are executable.
|
||||
#
|
||||
# The base image already has: ssh, apt, snapd, but we need to install more packages.
|
||||
# Packages installed are broken down into (each on a line):
|
||||
# - packages needed to run services (systemd)
|
||||
# - packages needed for kubernetes components
|
||||
# - packages needed by the container runtime
|
||||
# - misc packages kind uses itself
|
||||
# After installing packages we cleanup by:
|
||||
# - removing unwanted systemd services
|
||||
# - disabling kmsg in journald (these log entries would be confusing)
|
||||
#
|
||||
# Next we ensure the /etc/kubernetes/manifests directory exists. Normally
|
||||
# a kubeadm debain / rpm package would ensure that this exists but we install
|
||||
# freshly built binaries directly when we build the node image.
|
||||
#
|
||||
# Finally we adjust tempfiles cleanup to be 1 minute after "boot" instead of 15m
|
||||
# This is plenty after we've done initial setup for a node, but before we are
|
||||
# likely to try to export logs etc.
|
||||
RUN echo "Ensuring scripts are executable ..." \
|
||||
&& chmod +x /usr/local/bin/clean-install /usr/local/bin/entrypoint \
|
||||
&& echo "Installing Packages ..." \
|
||||
&& DEBIAN_FRONTEND=noninteractive clean-install \
|
||||
systemd \
|
||||
conntrack iptables iproute2 ethtool socat util-linux mount ebtables udev kmod \
|
||||
libseccomp2 \
|
||||
bash ca-certificates curl rsync \
|
||||
&& find /lib/systemd/system/sysinit.target.wants/ -name "systemd-tmpfiles-setup.service" -delete \
|
||||
&& rm -f /lib/systemd/system/multi-user.target.wants/* \
|
||||
&& rm -f /etc/systemd/system/*.wants/* \
|
||||
&& rm -f /lib/systemd/system/local-fs.target.wants/* \
|
||||
&& rm -f /lib/systemd/system/sockets.target.wants/*udev* \
|
||||
&& rm -f /lib/systemd/system/sockets.target.wants/*initctl* \
|
||||
&& rm -f /lib/systemd/system/basic.target.wants/* \
|
||||
&& echo "ReadKMsg=no" >> /etc/systemd/journald.conf \
|
||||
&& ln -s "$(which systemd)" /sbin/init \
|
||||
&& echo "Ensuring /etc/kubernetes/manifests" \
|
||||
&& mkdir -p /etc/kubernetes/manifests \
|
||||
&& echo "Adjusting systemd-tmpfiles timer" \
|
||||
&& sed -i /usr/lib/systemd/system/systemd-tmpfiles-clean.timer -e 's#OnBootSec=.*#OnBootSec=1min#' \
|
||||
&& echo "Modifying /etc/nsswitch.conf to prefer hosts" \
|
||||
&& sed -i /etc/nsswitch.conf -re 's#^(hosts:\s*).*#\1dns files#'
|
||||
|
||||
# tell systemd that it is in docker (it will check for the container env)
|
||||
# https://www.freedesktop.org/wiki/Software/systemd/ContainerInterface/
|
||||
ENV container docker
|
||||
# systemd exits on SIGRTMIN+3, not SIGTERM (which re-executes it)
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1201657
|
||||
STOPSIGNAL SIGRTMIN+3
|
||||
# NOTE: this is *only* for documentation, the entrypoint is overridden later
|
||||
ENTRYPOINT [ "/usr/local/bin/entrypoint", "/sbin/init" ]
|
||||
|
||||
ARG COMMIT_SHA
|
||||
# using base image created by kind https://github.com/kubernetes-sigs/kind/blob/v0.8.1/images/base/Dockerfile
|
||||
# using base image created by kind https://github.com/kubernetes-sigs/kind/blob/2c0eee40/images/base/Dockerfile
|
||||
# which is an ubuntu 20.04 with an entry-point that helps running systemd
|
||||
# could be changed to any debian that can run systemd
|
||||
FROM kindest/base:v20200430-2c0eee40 as base
|
||||
USER root
|
||||
# specify version of everything explicitly using 'apt-cache policy'
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
|
||||
# install system requirements from the regular distro repositories
|
||||
RUN clean-install \
|
||||
lz4 \
|
||||
gnupg \
|
||||
sudo \
|
||||
docker.io \
|
||||
containerd \
|
||||
openssh-server \
|
||||
dnsutils \
|
||||
runc \
|
||||
# libglib2.0-0 is required for conmon, which is required for podman
|
||||
libglib2.0-0 \
|
||||
# removing kind's crictl config
|
||||
&& rm /etc/crictl.yaml
|
||||
libglib2.0-0
|
||||
|
||||
# Install cri-o/podman dependencies:
|
||||
RUN sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" && \
|
||||
curl -LO https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_20.04/Release.key && \
|
||||
apt-key add - < Release.key && apt-get update && \
|
||||
apt-get install -y --no-install-recommends containers-common catatonit conmon containernetworking-plugins podman-plugins varlink
|
||||
apt-key add - < Release.key && \
|
||||
clean-install containers-common catatonit conmon containernetworking-plugins cri-tools podman-plugins varlink
|
||||
|
||||
# install cri-o based on https://github.com/cri-o/cri-o/commit/96b0c34b31a9fc181e46d7d8e34fb8ee6c4dc4e1#diff-04c6e90faac2675aa89e2176d2eec7d8R128
|
||||
RUN sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/1.18:/1.18.3/xUbuntu_20.04/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" && \
|
||||
curl -LO https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/1.18:/1.18.3/xUbuntu_20.04/Release.key && \
|
||||
apt-key add - < Release.key && apt-get update && \
|
||||
apt-get install -y --no-install-recommends cri-o=1.18.3~3
|
||||
apt-key add - < Release.key && \
|
||||
clean-install cri-o=1.18.3~3
|
||||
|
||||
# install podman
|
||||
RUN sh -c "echo 'deb https://dl.bintray.com/afbjorklund/podman focal main' > /etc/apt/sources.list.d/podman.list" && \
|
||||
curl -L https://bintray.com/user/downloadSubjectPublicKey?username=afbjorklund -o afbjorklund-public.key.asc && \
|
||||
apt-key add - < afbjorklund-public.key.asc && apt-get update && \
|
||||
apt-get install -y --no-install-recommends podman=1.9.3~1
|
||||
apt-key add - < afbjorklund-public.key.asc && \
|
||||
clean-install podman=1.9.3~1
|
||||
|
||||
RUN mkdir -p /usr/lib/cri-o-runc/sbin && cp /usr/local/sbin/runc /usr/lib/cri-o-runc/sbin/runc
|
||||
RUN mkdir -p /usr/lib/cri-o-runc/sbin && cp /usr/sbin/runc /usr/lib/cri-o-runc/sbin/runc
|
||||
|
||||
COPY entrypoint /usr/local/bin/entrypoint
|
||||
# automount service
|
||||
COPY automount/minikube-automount /usr/sbin/minikube-automount
|
||||
COPY automount/minikube-automount.service /usr/lib/systemd/system/minikube-automount.service
|
||||
|
@ -71,12 +152,7 @@ USER root
|
|||
# https://github.com/kubernetes-sigs/kind/blob/master/images/base/files/usr/local/bin/entrypoint
|
||||
RUN mkdir -p /kind
|
||||
# Deleting leftovers
|
||||
RUN apt-get clean -y && rm -rf \
|
||||
/var/cache/debconf/* \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/log/* \
|
||||
/tmp/* \
|
||||
/var/tmp/* \
|
||||
RUN rm -rf \
|
||||
/usr/share/doc/* \
|
||||
/usr/share/man/* \
|
||||
/usr/share/local/* \
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A script encapsulating a common Dockerimage pattern for installing packages
|
||||
# and then cleaning up the unnecessary install artifacts.
|
||||
# e.g. clean-install iptables ebtables conntrack
|
||||
|
||||
set -o errexit
|
||||
|
||||
if [ $# = 0 ]; then
|
||||
echo >&2 "No packages specified"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends "$@"
|
||||
apt-get clean -y
|
||||
rm -rf \
|
||||
/var/cache/debconf/* \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/log/* \
|
||||
/tmp/* \
|
||||
/var/tmp/* \
|
||||
/usr/share/doc/* \
|
||||
/usr/share/man/* \
|
||||
/usr/share/local/*
|
|
@ -1,4 +1,12 @@
|
|||
[
|
||||
{
|
||||
"name": "v1.14.0",
|
||||
"checksums": {
|
||||
"darwin": "71dee6241a93945b40ea7188ad15459e50e7b65eab09fed7302d8cacdc58585c",
|
||||
"linux": "8727635489be895d9b9cfaa5cb599f45799a28fb07e0a2aac351a9aa1c4b46c1",
|
||||
"windows": "0317e6c338da23ccf0aba698668c6d919f22e1482340a09d1269220063937aeb"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "v1.13.1",
|
||||
"checksums": {
|
||||
|
|
2
go.mod
2
go.mod
|
@ -25,7 +25,6 @@ require (
|
|||
github.com/evanphx/json-patch v4.5.0+incompatible // indirect
|
||||
github.com/go-ole/go-ole v1.2.4 // indirect
|
||||
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||
github.com/google/go-cmp v0.4.1
|
||||
github.com/google/go-containerregistry v0.0.0-20200601195303-96cf69f03a3c
|
||||
github.com/google/go-github v17.0.0+incompatible
|
||||
|
@ -86,6 +85,7 @@ require (
|
|||
k8s.io/api v0.17.4
|
||||
k8s.io/apimachinery v0.17.4
|
||||
k8s.io/client-go v0.17.4
|
||||
k8s.io/klog/v2 v2.3.0
|
||||
k8s.io/kubectl v0.0.0
|
||||
k8s.io/kubernetes v1.18.5
|
||||
sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.0+incompatible // indirect
|
||||
|
|
4
go.sum
4
go.sum
|
@ -353,6 +353,8 @@ github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTD
|
|||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
|
||||
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
|
||||
|
@ -1639,6 +1641,8 @@ k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUc
|
|||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/klog/v2 v2.3.0 h1:WmkrnW7fdrm0/DMClc+HIxtftvxVIPAhlVwMQo5yLco=
|
||||
k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/kube-aggregator v0.17.3/go.mod h1:1dMwMFQbmH76RKF0614L7dNenMl3dwnUJuOOyZ3GMXA=
|
||||
k8s.io/kube-controller-manager v0.17.3/go.mod h1:22B/TsgVviuCVuNwUrqgyTi5D4AYjMFaK9c8h1oonkY=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
|
||||
|
|
|
@ -42,7 +42,6 @@ var (
|
|||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
refs, err := extensionToBoilerplate(*boilerplatedir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
|
|
|
@ -67,7 +67,7 @@ if ! [[ ${VERSION_BUILD} =~ ^[0-9]+$ ]]; then
|
|||
fi
|
||||
|
||||
#echo "Updating Docker images ..."
|
||||
#make push-gvisor-addon-image push-storage-provisioner-image
|
||||
#make push-gvisor-addon-image push-storage-provisioner-manifest
|
||||
|
||||
echo "Updating latest bucket for ${VERSION} release ..."
|
||||
gsutil cp -r "gs://${BUCKET}/releases/${TAGNAME}/*" "gs://${BUCKET}/releases/latest/"
|
||||
|
|
|
@ -81,7 +81,7 @@ for path in $(gsutil ls "gs://${ISO_BUCKET}/minikube-v${VERSION}*" || true); do
|
|||
done
|
||||
|
||||
# Upload all end-user assets other than preload files, as they are release independent
|
||||
for file in out/minikube[_-]* out/docker-machine-*; do
|
||||
for file in $( find out \( -name "minikube[_-]*" -or -name "docker-machine-*" \) -and ! -name "*latest*"); do
|
||||
n=0
|
||||
until [ $n -ge 5 ]
|
||||
do
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors All rights reserved.
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -14,159 +14,450 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
The script expects the following env variables:
|
||||
- UPDATE_TARGET=<string>: optional - if unset/absent, default option is "fs"; valid options are:
|
||||
- "fs" - update only local filesystem repo files [default]
|
||||
- "gh" - update only remote GitHub repo files and create PR (if one does not exist already)
|
||||
- "all" - update local and remote repo files and create PR (if one does not exist already)
|
||||
- GITHUB_TOKEN=<string>: The Github API access token. Injected by the Jenkins credential provider.
|
||||
- note: GITHUB_TOKEN is needed only if UPDATE_TARGET is "gh" or "all"
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/oauth2"
|
||||
|
||||
"github.com/google/go-github/v32/github"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// init glog: by default, all log statements write to files in a temporary directory, also
|
||||
// flag.Parse must be called before any logging is done
|
||||
flag.Parse()
|
||||
_ = flag.Set("logtostderr", "true")
|
||||
const (
|
||||
// default context timeout
|
||||
cxTimeout = 300 * time.Second
|
||||
|
||||
// fetch respective current stable (vDefault as DefaultKubernetesVersion) and
|
||||
// latest rc or beta (vDefault as NewestKubernetesVersion) Kubernetes GitHub Releases
|
||||
vDefault, vNewest, err := fetchKubernetesReleases()
|
||||
if err != nil {
|
||||
glog.Errorf("Fetching current GitHub Releases failed: %v", err)
|
||||
}
|
||||
if vDefault == "" || vNewest == "" {
|
||||
glog.Fatalf("Cannot determine current 'DefaultKubernetesVersion' and 'NewestKubernetesVersion'")
|
||||
}
|
||||
glog.Infof("Current Kubernetes GitHub Releases: 'stable' is %s and 'latest' is %s", vDefault, vNewest)
|
||||
|
||||
if err := updateKubernetesVersions(vDefault, vNewest); err != nil {
|
||||
glog.Fatalf("Updating 'DefaultKubernetesVersion' and 'NewestKubernetesVersion' failed: %v", err)
|
||||
}
|
||||
glog.Infof("Update successful: 'DefaultKubernetesVersion' was set to %s and 'NewestKubernetesVersion' was set to %s", vDefault, vNewest)
|
||||
|
||||
// Flush before exiting to guarantee all log output is written
|
||||
glog.Flush()
|
||||
}
|
||||
|
||||
// fetchKubernetesReleases returns respective current stable (as vDefault) and
|
||||
// latest rc or beta (as vNewest) Kubernetes GitHub Releases, and any error
|
||||
func fetchKubernetesReleases() (vDefault, vNewest string, err error) {
|
||||
client := github.NewClient(nil)
|
||||
|
||||
// set a context with a deadline - timeout after at most 10 seconds
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// walk through the paginated list of all 'kubernetes/kubernetes' repo releases
|
||||
// from latest to older releases, until latest release and pre-release are found
|
||||
// use max value (100) for PerPage to avoid hitting the rate limits (60 per hour, 10 per minute)
|
||||
// see https://godoc.org/github.com/google/go-github/github#hdr-Rate_Limiting
|
||||
opt := &github.ListOptions{PerPage: 100}
|
||||
for {
|
||||
rels, resp, err := client.Repositories.ListReleases(ctx, "kubernetes", "kubernetes", opt)
|
||||
ghListOptionsPerPage = 100
|
||||
)
|
||||
|
||||
var (
|
||||
// root directory of the local filesystem repo to update
|
||||
fsRoot = "../../"
|
||||
|
||||
// map key corresponds to GitHub TreeEntry.Path and local repo file path (prefixed with fsRoot)
|
||||
plan = map[string]Patch{
|
||||
"pkg/minikube/constants/constants.go": {
|
||||
Replace: map[string]string{
|
||||
`DefaultKubernetesVersion = \".*`: `DefaultKubernetesVersion = "{{.K8sStableVersion}}"`,
|
||||
`NewestKubernetesVersion = \".*`: `NewestKubernetesVersion = "{{.K8sLatestVersion}}"`,
|
||||
},
|
||||
},
|
||||
"site/content/en/docs/commands/start.md": {
|
||||
Replace: map[string]string{
|
||||
`'stable' for .*,`: `'stable' for {{.K8sStableVersion}},`,
|
||||
`'latest' for .*\)`: `'latest' for {{.K8sLatestVersion}})`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
target = os.Getenv("UPDATE_TARGET")
|
||||
|
||||
// GitHub repo data
|
||||
ghToken = os.Getenv("GITHUB_TOKEN")
|
||||
ghOwner = "kubernetes"
|
||||
ghRepo = "minikube"
|
||||
ghBase = "master" // could be "main" in the future?
|
||||
|
||||
// PR data
|
||||
prBranchPrefix = "update-kubernetes-version_" // will be appended with first 7 characters of the PR commit SHA
|
||||
prTitle = `update_kubernetes_version: {stable:"{{.K8sStableVersion}}", latest:"{{.K8sLatestVersion}}"}`
|
||||
prIssue = 4392
|
||||
prSearchLimit = 100 // limit the number of previous PRs searched for same prTitle to be <= N * ghListOptionsPerPage
|
||||
)
|
||||
|
||||
// Data holds respective stable (release) and latest (pre-release) Kubernetes versions
|
||||
type Data struct {
|
||||
K8sStableVersion string `json:"k8sStableVersion"`
|
||||
K8sLatestVersion string `json:"k8sLatestVersion"`
|
||||
}
|
||||
|
||||
// Patch defines content where all occurrences of each replace map key should be swapped with its
|
||||
// respective value. Replace map keys can use RegExp and values can use Golang Text Template
|
||||
type Patch struct {
|
||||
Content []byte `json:"-"`
|
||||
Replace map[string]string `json:"replace"`
|
||||
}
|
||||
|
||||
// apply patch to content by replacing all occurrences of map's keys with their respective values
|
||||
func (p *Patch) apply(data interface{}) (changed bool, err error) {
|
||||
if p.Content == nil || p.Replace == nil {
|
||||
return false, fmt.Errorf("nothing to patch")
|
||||
}
|
||||
org := string(p.Content)
|
||||
str := org
|
||||
for src, dst := range p.Replace {
|
||||
re := regexp.MustCompile(src)
|
||||
tmpl := template.Must(template.New("").Parse(dst))
|
||||
buf := new(bytes.Buffer)
|
||||
if err := tmpl.Execute(buf, data); err != nil {
|
||||
return false, err
|
||||
}
|
||||
str = re.ReplaceAllString(str, buf.String())
|
||||
}
|
||||
p.Content = []byte(str)
|
||||
|
||||
return str != org, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
// write log statements to stderr instead of to files
|
||||
if err := flag.Set("logtostderr", "true"); err != nil {
|
||||
fmt.Printf("Error setting 'logtostderr' klog flag: %v", err)
|
||||
}
|
||||
flag.Parse()
|
||||
defer klog.Flush()
|
||||
|
||||
if target == "" {
|
||||
target = "fs"
|
||||
} else if target != "fs" && target != "gh" && target != "all" {
|
||||
klog.Fatalf("Invalid UPDATE_TARGET option: '%s'; Valid options are: unset/absent (defaults to 'fs'), 'fs', 'gh', or 'all'", target)
|
||||
} else if (target == "gh" || target == "all") && ghToken == "" {
|
||||
klog.Fatalf("GITHUB_TOKEN is required if UPDATE_TARGET is 'gh' or 'all'")
|
||||
}
|
||||
|
||||
// set a context with defined timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), cxTimeout)
|
||||
defer cancel()
|
||||
|
||||
// get Kubernetes versions from GitHub Releases
|
||||
stable, latest, err := ghReleases(ctx, "kubernetes", "kubernetes", ghToken)
|
||||
if err != nil || stable == "" || latest == "" {
|
||||
klog.Fatalf("Error getting Kubernetes versions: %v", err)
|
||||
}
|
||||
data := Data{K8sStableVersion: stable, K8sLatestVersion: latest}
|
||||
klog.Infof("Kubernetes versions: 'stable' is %s and 'latest' is %s", data.K8sStableVersion, data.K8sLatestVersion)
|
||||
|
||||
klog.Infof("The Plan:\n%s", thePlan(plan, data))
|
||||
|
||||
if target == "fs" || target == "all" {
|
||||
changed, err := fsUpdate(fsRoot, plan, data)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
klog.Errorf("Error updating local repo: %v", err)
|
||||
} else if !changed {
|
||||
klog.Infof("Local repo update skipped: nothing changed")
|
||||
} else {
|
||||
klog.Infof("Local repo updated")
|
||||
}
|
||||
}
|
||||
|
||||
for _, r := range rels {
|
||||
// GetName returns the Name field if it's non-nil, zero value otherwise.
|
||||
ver := r.GetName()
|
||||
if ver == "" {
|
||||
continue
|
||||
}
|
||||
if target == "gh" || target == "all" {
|
||||
// update prTitle replacing template placeholders with concrete data values
|
||||
tmpl := template.Must(template.New("prTitle").Parse(prTitle))
|
||||
buf := new(bytes.Buffer)
|
||||
if err := tmpl.Execute(buf, data); err != nil {
|
||||
klog.Fatalf("Error parsing PR Title: %v", err)
|
||||
}
|
||||
prTitle = buf.String()
|
||||
|
||||
rel := strings.Split(ver, "-")
|
||||
// check if it is a release channel (ie, 'v1.19.2') or a
|
||||
// pre-release channel (ie, 'v1.19.3-rc.0' or 'v1.19.0-beta.2')
|
||||
if len(rel) == 1 && vDefault == "" {
|
||||
vDefault = ver
|
||||
} else if len(rel) > 1 && vNewest == "" {
|
||||
if strings.HasPrefix(rel[1], "rc") || strings.HasPrefix(rel[1], "beta") {
|
||||
vNewest = ver
|
||||
}
|
||||
}
|
||||
|
||||
if vDefault != "" && vNewest != "" {
|
||||
// make sure that vNewest >= vDefault
|
||||
if vNewest < vDefault {
|
||||
vNewest = vDefault
|
||||
}
|
||||
return vDefault, vNewest, nil
|
||||
// check if PR already exists
|
||||
prURL, err := ghFindPR(ctx, prTitle, ghOwner, ghRepo, ghBase, ghToken)
|
||||
if err != nil {
|
||||
klog.Errorf("Error checking if PR already exists: %v", err)
|
||||
} else if prURL != "" {
|
||||
klog.Infof("PR create skipped: already exists (%s)", prURL)
|
||||
} else {
|
||||
// create PR
|
||||
pr, err := ghCreatePR(ctx, ghOwner, ghRepo, ghBase, prBranchPrefix, prTitle, prIssue, ghToken, plan, data)
|
||||
if err != nil {
|
||||
klog.Fatalf("Error creating PR: %v", err)
|
||||
} else if pr == nil {
|
||||
klog.Infof("PR create skipped: nothing changed")
|
||||
} else {
|
||||
klog.Infof("PR created: %s", *pr.HTMLURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fsUpdate updates local filesystem repo files according to the given plan and data,
|
||||
// returns if the update actually changed anything, and any error occurred
|
||||
func fsUpdate(fsRoot string, plan map[string]Patch, data Data) (changed bool, err error) {
|
||||
for path, p := range plan {
|
||||
path = filepath.Join(fsRoot, path)
|
||||
blob, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
mode := info.Mode()
|
||||
|
||||
p.Content = blob
|
||||
chg, err := p.apply(data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if chg {
|
||||
changed = true
|
||||
}
|
||||
if err := ioutil.WriteFile(path, p.Content, mode); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return changed, nil
|
||||
}
|
||||
|
||||
// ghCreatePR returns PR created in the GitHub owner/repo, applying the changes to the base head
|
||||
// commit fork, as defined by the plan and data, and also returns any error occurred
|
||||
// PR branch will be named by the branch, sufixed by '_' and first 7 characters of fork commit SHA
|
||||
// PR itself will be named by the title and will reference the issue
|
||||
func ghCreatePR(ctx context.Context, owner, repo, base, branch, title string, issue int, token string, plan map[string]Patch, data Data) (*github.PullRequest, error) {
|
||||
ghc := ghClient(ctx, token)
|
||||
|
||||
// get base branch
|
||||
baseBranch, _, err := ghc.Repositories.GetBranch(ctx, owner, repo, base)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting base branch: %w", err)
|
||||
}
|
||||
|
||||
// get base commit
|
||||
baseCommit, _, err := ghc.Repositories.GetCommit(ctx, owner, repo, *baseBranch.Commit.SHA)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting base commit: %w", err)
|
||||
}
|
||||
|
||||
// get base tree
|
||||
baseTree, _, err := ghc.Git.GetTree(ctx, owner, repo, baseCommit.GetSHA(), true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting base tree: %w", err)
|
||||
}
|
||||
|
||||
// update files
|
||||
changes, err := ghUpdate(ctx, owner, repo, baseTree, token, plan, data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error updating files: %w", err)
|
||||
}
|
||||
if changes == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// create fork
|
||||
fork, resp, err := ghc.Repositories.CreateFork(ctx, owner, repo, nil)
|
||||
// https://pkg.go.dev/github.com/google/go-github/v32@v32.1.0/github#RepositoriesService.CreateFork
|
||||
// This method might return an *AcceptedError and a status code of 202. This is because this is
|
||||
// the status that GitHub returns to signify that it is now computing creating the fork in a
|
||||
// background task. In this event, the Repository value will be returned, which includes the
|
||||
// details about the pending fork. A follow up request, after a delay of a second or so, should
|
||||
// result in a successful request.
|
||||
if resp.StatusCode == 202 { // *AcceptedError
|
||||
time.Sleep(time.Second * 5)
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("error creating fork: %w", err)
|
||||
}
|
||||
|
||||
// create fork tree from base and changed files
|
||||
forkTree, _, err := ghc.Git.CreateTree(ctx, *fork.Owner.Login, *fork.Name, *baseTree.SHA, changes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating fork tree: %w", err)
|
||||
}
|
||||
|
||||
// create fork commit
|
||||
forkCommit, _, err := ghc.Git.CreateCommit(ctx, *fork.Owner.Login, *fork.Name, &github.Commit{
|
||||
Message: github.String(title),
|
||||
Tree: &github.Tree{SHA: forkTree.SHA},
|
||||
Parents: []*github.Commit{{SHA: baseCommit.SHA}},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating fork commit: %w", err)
|
||||
}
|
||||
klog.Infof("PR commit '%s' created: %s", forkCommit.GetSHA(), forkCommit.GetHTMLURL())
|
||||
|
||||
// create PR branch
|
||||
prBranch := branch + forkCommit.GetSHA()[:7]
|
||||
prRef, _, err := ghc.Git.CreateRef(ctx, *fork.Owner.Login, *fork.Name, &github.Reference{
|
||||
Ref: github.String("refs/heads/" + prBranch),
|
||||
Object: &github.GitObject{
|
||||
Type: github.String("commit"),
|
||||
SHA: forkCommit.SHA,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating PR branch: %w", err)
|
||||
}
|
||||
klog.Infof("PR branch '%s' created: %s", prBranch, prRef.GetURL())
|
||||
|
||||
// create PR
|
||||
modifiable := true
|
||||
pr, _, err := ghc.PullRequests.Create(ctx, owner, repo, &github.NewPullRequest{
|
||||
Title: github.String(title),
|
||||
Head: github.String(*fork.Owner.Login + ":" + prBranch),
|
||||
Base: github.String(base),
|
||||
Body: github.String(fmt.Sprintf("fixes #%d\n\nAutomatically created PR to update repo according to the Plan:\n\n```\n%s\n```", issue, thePlan(plan, data))),
|
||||
MaintainerCanModify: &modifiable,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating pull request: %w", err)
|
||||
}
|
||||
return pr, nil
|
||||
}
|
||||
|
||||
// ghUpdate updates remote GitHub owner/repo tree according to the given token, plan and data,
|
||||
// returns resulting changes, and any error occurred
|
||||
func ghUpdate(ctx context.Context, owner, repo string, tree *github.Tree, token string, plan map[string]Patch, data Data) (changes []*github.TreeEntry, err error) {
|
||||
ghc := ghClient(ctx, token)
|
||||
|
||||
// load each plan's path content and update it creating new GitHub TreeEntries
|
||||
cnt := len(plan) // expected number of files to change
|
||||
for _, org := range tree.Entries {
|
||||
if *org.Type == "blob" {
|
||||
if patch, match := plan[*org.Path]; match {
|
||||
blob, _, err := ghc.Git.GetBlobRaw(ctx, owner, repo, *org.SHA)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting file: %w", err)
|
||||
}
|
||||
patch.Content = blob
|
||||
changed, err := patch.apply(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error patching file: %w", err)
|
||||
}
|
||||
if changed {
|
||||
// add github.TreeEntry that will replace original path content with patched one
|
||||
changes = append(changes, &github.TreeEntry{
|
||||
Path: org.Path,
|
||||
Mode: org.Mode,
|
||||
Type: org.Type,
|
||||
Content: github.String(string(patch.Content)),
|
||||
})
|
||||
}
|
||||
if cnt--; cnt == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if cnt != 0 {
|
||||
return nil, fmt.Errorf("error finding all the files (%d missing) - check the Plan: %w", cnt, err)
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
// ghFindPR returns URL of the PR if found in the given GitHub ower/repo base and any error occurred
|
||||
func ghFindPR(ctx context.Context, title, owner, repo, base, token string) (url string, err error) {
|
||||
ghc := ghClient(ctx, token)
|
||||
|
||||
// walk through the paginated list of all pull requests, from latest to older releases
|
||||
opts := &github.PullRequestListOptions{State: "all", Base: base, ListOptions: github.ListOptions{PerPage: ghListOptionsPerPage}}
|
||||
for (opts.Page+1)*ghListOptionsPerPage <= prSearchLimit {
|
||||
prs, resp, err := ghc.PullRequests.List(ctx, owner, repo, opts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, pr := range prs {
|
||||
if pr.GetTitle() == title {
|
||||
return pr.GetHTMLURL(), nil
|
||||
}
|
||||
}
|
||||
if resp.NextPage == 0 {
|
||||
break
|
||||
}
|
||||
opt.Page = resp.NextPage
|
||||
opts.Page = resp.NextPage
|
||||
}
|
||||
return vDefault, vNewest, nil
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// updateKubernetesVersions updates DefaultKubernetesVersion to vDefault release and
|
||||
// NewestKubernetesVersion to vNewest release, and returns any error
|
||||
func updateKubernetesVersions(vDefault, vNewest string) error {
|
||||
if err := replaceAllString("../../pkg/minikube/constants/constants.go", map[string]string{
|
||||
`DefaultKubernetesVersion = \".*`: "DefaultKubernetesVersion = \"" + vDefault + "\"",
|
||||
`NewestKubernetesVersion = \".*`: "NewestKubernetesVersion = \"" + vNewest + "\"",
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
// ghReleases returns current stable release and latest rc or beta pre-release
|
||||
// from GitHub owner/repo repository, and any error;
|
||||
// if latest pre-release version is lower than current stable release, then it
|
||||
// will return current stable release for both
|
||||
func ghReleases(ctx context.Context, owner, repo, token string) (stable, latest string, err error) {
|
||||
ghc := ghClient(ctx, token)
|
||||
|
||||
if err := replaceAllString("../../site/content/en/docs/commands/start.md", map[string]string{
|
||||
`'stable' for .*,`: "'stable' for " + vDefault + ",",
|
||||
`'latest' for .*\)`: "'latest' for " + vNewest + ")",
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// update testData just for the latest 'v<MAJOR>.<MINOR>.0' from vDefault
|
||||
vDefaultMM := vDefault[:strings.LastIndex(vDefault, ".")]
|
||||
testData := "../../pkg/minikube/bootstrapper/bsutil/testdata/" + vDefaultMM
|
||||
|
||||
return filepath.Walk(testData, func(path string, info os.FileInfo, err error) error {
|
||||
// walk through the paginated list of all owner/repo releases, from newest to oldest
|
||||
opts := &github.ListOptions{PerPage: ghListOptionsPerPage}
|
||||
for {
|
||||
rls, resp, err := ghc.Repositories.ListReleases(ctx, owner, repo, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", "", err
|
||||
}
|
||||
if !strings.HasSuffix(path, "default.yaml") {
|
||||
return nil
|
||||
for _, rl := range rls {
|
||||
ver := rl.GetName()
|
||||
if ver == "" {
|
||||
continue
|
||||
}
|
||||
// check if ver version is a release (ie, 'v1.19.2') or a
|
||||
// pre-release (ie, 'v1.19.3-rc.0' or 'v1.19.0-beta.2') channel ch
|
||||
// note: github.RepositoryRelease GetPrerelease() bool would be useful for all pre-rels
|
||||
ch := strings.Split(ver, "-")
|
||||
if len(ch) == 1 && stable == "" {
|
||||
stable = ver
|
||||
} else if len(ch) > 1 && latest == "" {
|
||||
if strings.HasPrefix(ch[1], "rc") || strings.HasPrefix(ch[1], "beta") {
|
||||
latest = ver
|
||||
}
|
||||
}
|
||||
if stable != "" && latest != "" {
|
||||
// make sure that v.Latest >= stable
|
||||
if latest < stable {
|
||||
latest = stable
|
||||
}
|
||||
return stable, latest, nil
|
||||
}
|
||||
}
|
||||
return replaceAllString(path, map[string]string{
|
||||
`kubernetesVersion: .*`: "kubernetesVersion: " + vDefaultMM + ".0",
|
||||
})
|
||||
})
|
||||
if resp.NextPage == 0 {
|
||||
break
|
||||
}
|
||||
opts.Page = resp.NextPage
|
||||
}
|
||||
return stable, latest, nil
|
||||
}
|
||||
|
||||
// replaceAllString replaces all occuranes of map's keys with their respective values in the file
|
||||
func replaceAllString(path string, pairs map[string]string) error {
|
||||
fb, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
// ghClient returns GitHub Client with a given context and optional token for authenticated requests
|
||||
func ghClient(ctx context.Context, token string) *github.Client {
|
||||
if token == "" {
|
||||
return github.NewClient(nil)
|
||||
}
|
||||
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mode := info.Mode()
|
||||
|
||||
f := string(fb)
|
||||
for org, new := range pairs {
|
||||
re := regexp.MustCompile(org)
|
||||
f = re.ReplaceAllString(f, new)
|
||||
}
|
||||
if err := ioutil.WriteFile(path, []byte(f), mode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
ts := oauth2.StaticTokenSource(
|
||||
&oauth2.Token{AccessToken: token},
|
||||
)
|
||||
tc := oauth2.NewClient(ctx, ts)
|
||||
return github.NewClient(tc)
|
||||
}
|
||||
|
||||
// thePlan parses and returns updated plan replacing template placeholders with concrete data values
|
||||
func thePlan(plan map[string]Patch, data Data) (prettyprint string) {
|
||||
for _, p := range plan {
|
||||
for src, dst := range p.Replace {
|
||||
tmpl := template.Must(template.New("").Parse(dst))
|
||||
buf := new(bytes.Buffer)
|
||||
if err := tmpl.Execute(buf, data); err != nil {
|
||||
klog.Fatalf("Error parsing the Plan: %v", err)
|
||||
return fmt.Sprintf("%+v", plan)
|
||||
}
|
||||
p.Replace[src] = buf.String()
|
||||
}
|
||||
}
|
||||
str, err := json.MarshalIndent(plan, "", " ")
|
||||
if err != nil {
|
||||
klog.Fatalf("Error parsing the Plan: %v", err)
|
||||
return fmt.Sprintf("%+v", plan)
|
||||
}
|
||||
return string(str)
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ import (
|
|||
|
||||
func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string) error {
|
||||
driver := kic.NewDriver(kic.Config{
|
||||
ClusterName: profile,
|
||||
KubernetesVersion: kubernetesVersion,
|
||||
ContainerRuntime: containerRuntime,
|
||||
OCIBinary: oci.Docker,
|
||||
|
|
|
@ -19,8 +19,9 @@ package main
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/go-github/github"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// RecentK8sVersions returns the most recent k8s version, usually around 30
|
||||
|
@ -35,6 +36,6 @@ func RecentK8sVersions() ([]string, error) {
|
|||
for _, r := range list {
|
||||
releases = append(releases, r.GetTagName())
|
||||
}
|
||||
glog.Infof("Got releases: %v", releases)
|
||||
klog.Infof("Got releases: %v", releases)
|
||||
return releases, nil
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg width="702px" height="683px" viewBox="0 0 702 683" version="1.1" xmlns="https://www.w3.org/2000/svg" xmlns:xlink="https://www.w3.org/1999/xlink">
|
||||
<!-- Generator: Sketch 39.1 (31720) - https://www.sketchapp.com/ -->
|
||||
<svg width="702px" height="683px" viewBox="0 0 702 683" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<!-- Generator: Sketch 39.1 (31720) - http://www.bohemiancoding.com/sketch -->
|
||||
<title>minikube</title>
|
||||
<desc>Created with Sketch.</desc>
|
||||
<defs>
|
||||
|
|
Before Width: | Height: | Size: 8.1 KiB After Width: | Height: | Size: 8.1 KiB |
|
@ -26,10 +26,10 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/kapi"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
|
@ -52,7 +52,7 @@ const defaultStorageClassProvisioner = "standard"
|
|||
|
||||
// RunCallbacks runs all actions associated to an addon, but does not set it (thread-safe)
|
||||
func RunCallbacks(cc *config.ClusterConfig, name string, value string) error {
|
||||
glog.Infof("Setting %s=%s in profile %q", name, value, cc.Name)
|
||||
klog.Infof("Setting %s=%s in profile %q", name, value, cc.Name)
|
||||
a, valid := isAddonValid(name)
|
||||
if !valid {
|
||||
return errors.Errorf("%s is not a valid addon", name)
|
||||
|
@ -94,7 +94,7 @@ func SetAndSave(profile string, name string, value string) error {
|
|||
return errors.Wrap(err, "set")
|
||||
}
|
||||
|
||||
glog.Infof("Writing out %q config to set %s=%v...", profile, name, value)
|
||||
klog.Infof("Writing out %q config to set %s=%v...", profile, name, value)
|
||||
return config.Write(profile, cc)
|
||||
}
|
||||
|
||||
|
@ -128,7 +128,7 @@ func SetBool(cc *config.ClusterConfig, name string, val string) error {
|
|||
|
||||
// enableOrDisableAddon updates addon status executing any commands necessary
|
||||
func enableOrDisableAddon(cc *config.ClusterConfig, name string, val string) error {
|
||||
glog.Infof("Setting addon %s=%s in %q", name, val, cc.Name)
|
||||
klog.Infof("Setting addon %s=%s in %q", name, val, cc.Name)
|
||||
enable, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "parsing bool: %s", name)
|
||||
|
@ -137,7 +137,7 @@ func enableOrDisableAddon(cc *config.ClusterConfig, name string, val string) err
|
|||
|
||||
// check addon status before enabling/disabling it
|
||||
if isAddonAlreadySet(cc, addon, enable) {
|
||||
glog.Warningf("addon %s should already be in state %v", name, val)
|
||||
klog.Warningf("addon %s should already be in state %v", name, val)
|
||||
if !enable {
|
||||
return nil
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ https://github.com/kubernetes/minikube/issues/7332`, out.V{"driver_name": cc.Dri
|
|||
mName := driver.MachineName(*cc, cp)
|
||||
host, err := machine.LoadHost(api, mName)
|
||||
if err != nil || !machine.IsRunning(api, mName) {
|
||||
glog.Warningf("%q is not running, setting %s=%v and skipping enablement (err=%v)", mName, addon.Name(), enable, err)
|
||||
klog.Warningf("%q is not running, setting %s=%v and skipping enablement (err=%v)", mName, addon.Name(), enable, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -240,15 +240,15 @@ func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon,
|
|||
fPath := path.Join(f.GetTargetDir(), f.GetTargetName())
|
||||
|
||||
if enable {
|
||||
glog.Infof("installing %s", fPath)
|
||||
klog.Infof("installing %s", fPath)
|
||||
if err := cmd.Copy(f); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
glog.Infof("Removing %+v", fPath)
|
||||
klog.Infof("Removing %+v", fPath)
|
||||
defer func() {
|
||||
if err := cmd.Remove(f); err != nil {
|
||||
glog.Warningf("error removing %s; addon should still be disabled as expected", fPath)
|
||||
klog.Warningf("error removing %s; addon should still be disabled as expected", fPath)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@ -261,7 +261,7 @@ func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon,
|
|||
apply := func() error {
|
||||
_, err := cmd.RunCmd(kubectlCommand(cc, deployFiles, enable))
|
||||
if err != nil {
|
||||
glog.Warningf("apply failed, will retry: %v", err)
|
||||
klog.Warningf("apply failed, will retry: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon,
|
|||
|
||||
// enableOrDisableStorageClasses enables or disables storage classes
|
||||
func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val string) error {
|
||||
glog.Infof("enableOrDisableStorageClasses %s=%v on %q", name, val, cc.Name)
|
||||
klog.Infof("enableOrDisableStorageClasses %s=%v on %q", name, val, cc.Name)
|
||||
enable, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error parsing boolean")
|
||||
|
@ -293,7 +293,7 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st
|
|||
return errors.Wrap(err, "getting control plane")
|
||||
}
|
||||
if !machine.IsRunning(api, driver.MachineName(*cc, cp)) {
|
||||
glog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement", driver.MachineName(*cc, cp), name, val)
|
||||
klog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement", driver.MachineName(*cc, cp), name, val)
|
||||
return enableOrDisableAddon(cc, name, val)
|
||||
}
|
||||
|
||||
|
@ -324,11 +324,22 @@ func verifyAddonStatus(cc *config.ClusterConfig, name string, val string) error
|
|||
}
|
||||
|
||||
func verifyGCPAuthAddon(cc *config.ClusterConfig, name string, val string) error {
|
||||
return verifyAddonStatusInternal(cc, name, val, "gcp-auth")
|
||||
enable, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "parsing bool: %s", name)
|
||||
}
|
||||
err = verifyAddonStatusInternal(cc, name, val, "gcp-auth")
|
||||
|
||||
if enable && err == nil {
|
||||
out.T(style.Notice, "Your GCP credentials will now be mounted into every pod created in the {{.name}} cluster.", out.V{"name": cc.Name})
|
||||
out.T(style.Notice, "If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.")
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func verifyAddonStatusInternal(cc *config.ClusterConfig, name string, val string, ns string) error {
|
||||
glog.Infof("Verifying addon %s=%s in %q", name, val, cc.Name)
|
||||
klog.Infof("Verifying addon %s=%s in %q", name, val, cc.Name)
|
||||
enable, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "parsing bool: %s", name)
|
||||
|
@ -356,9 +367,9 @@ func Start(wg *sync.WaitGroup, cc *config.ClusterConfig, toEnable map[string]boo
|
|||
defer wg.Done()
|
||||
|
||||
start := time.Now()
|
||||
glog.Infof("enableAddons start: toEnable=%v, additional=%s", toEnable, additional)
|
||||
klog.Infof("enableAddons start: toEnable=%v, additional=%s", toEnable, additional)
|
||||
defer func() {
|
||||
glog.Infof("enableAddons completed in %s", time.Since(start))
|
||||
klog.Infof("enableAddons completed in %s", time.Since(start))
|
||||
}()
|
||||
|
||||
// Get the default values of any addons not saved to our config
|
||||
|
@ -394,9 +405,11 @@ func Start(wg *sync.WaitGroup, cc *config.ClusterConfig, toEnable map[string]boo
|
|||
|
||||
var awg sync.WaitGroup
|
||||
|
||||
defer func() { // making it show after verifications( not perfect till #7613 is closed)
|
||||
enabledAddons := []string{}
|
||||
|
||||
defer func() { // making it show after verifications (see #7613)
|
||||
register.Reg.SetStep(register.EnablingAddons)
|
||||
out.T(style.AddonEnable, "Enabled addons: {{.addons}}", out.V{"addons": strings.Join(toEnableList, ", ")})
|
||||
out.T(style.AddonEnable, "Enabled addons: {{.addons}}", out.V{"addons": strings.Join(enabledAddons, ", ")})
|
||||
}()
|
||||
for _, a := range toEnableList {
|
||||
awg.Add(1)
|
||||
|
@ -404,6 +417,8 @@ func Start(wg *sync.WaitGroup, cc *config.ClusterConfig, toEnable map[string]boo
|
|||
err := RunCallbacks(cc, name, "true")
|
||||
if err != nil {
|
||||
out.WarningT("Enabling '{{.name}}' returned an error: {{.error}}", out.V{"name": name, "error": err})
|
||||
} else {
|
||||
enabledAddons = append(enabledAddons, name)
|
||||
}
|
||||
awg.Done()
|
||||
}(a)
|
||||
|
@ -411,9 +426,10 @@ func Start(wg *sync.WaitGroup, cc *config.ClusterConfig, toEnable map[string]boo
|
|||
|
||||
// Wait until all of the addons are enabled before updating the config (not thread safe)
|
||||
awg.Wait()
|
||||
for _, a := range toEnableList {
|
||||
|
||||
for _, a := range enabledAddons {
|
||||
if err := Set(cc, a, "true"); err != nil {
|
||||
glog.Errorf("store failed: %v", err)
|
||||
klog.Errorf("store failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -169,7 +169,7 @@ var Addons = []*Addon{
|
|||
{
|
||||
name: "gcp-auth",
|
||||
set: SetBool,
|
||||
callbacks: []setFn{gcpauth.EnableOrDisable, enableOrDisableAddon, verifyGCPAuthAddon, gcpauth.DisplayAddonMessage},
|
||||
callbacks: []setFn{gcpauth.EnableOrDisable, enableOrDisableAddon, verifyGCPAuthAddon},
|
||||
},
|
||||
{
|
||||
name: "volumesnapshots",
|
||||
|
|
|
@ -60,7 +60,7 @@ func enableAddon(cfg *config.ClusterConfig) error {
|
|||
ctx := context.Background()
|
||||
creds, err := google.FindDefaultCredentials(ctx)
|
||||
if err != nil {
|
||||
exit.Message(reason.InternalCredsNotFound, "Could not find any GCP credentials. Either run `gcloud auth login` or set the GOOGLE_APPLICATION_CREDENTIALS environment variable to the path of your credentials file.")
|
||||
exit.Message(reason.InternalCredsNotFound, "Could not find any GCP credentials. Either run `gcloud auth application-default login` or set the GOOGLE_APPLICATION_CREDENTIALS environment variable to the path of your credentials file.")
|
||||
}
|
||||
|
||||
f := assets.NewMemoryAssetTarget(creds.JSON, credentialsPath, "0444")
|
||||
|
@ -116,16 +116,3 @@ func disableAddon(cfg *config.ClusterConfig) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DisplayAddonMessage display an gcp auth addon specific message to the user
|
||||
func DisplayAddonMessage(cfg *config.ClusterConfig, name string, val string) error {
|
||||
enable, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "parsing bool: %s", name)
|
||||
}
|
||||
if enable {
|
||||
out.T(style.Notice, "Your GCP credentials will now be mounted into every pod created in the {{.name}} cluster.", out.V{"name": cfg.Name})
|
||||
out.T(style.Notice, "If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -27,8 +27,10 @@ import (
|
|||
"github.com/docker/machine/libmachine/mcnflag"
|
||||
"github.com/docker/machine/libmachine/mcnutils"
|
||||
"github.com/docker/machine/libmachine/ssh"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/util"
|
||||
)
|
||||
|
||||
// This file is for common code shared among internal machine drivers
|
||||
|
@ -74,7 +76,7 @@ func createRawDiskImage(sshKeyPath, diskPath string, diskSizeMb int) error {
|
|||
return errors.Wrapf(err, "closing file %s", diskPath)
|
||||
}
|
||||
|
||||
if err := os.Truncate(diskPath, int64(diskSizeMb*1000000)); err != nil {
|
||||
if err := os.Truncate(diskPath, util.ConvertMBToBytes(diskSizeMb)); err != nil {
|
||||
return errors.Wrap(err, "truncate")
|
||||
}
|
||||
return nil
|
||||
|
@ -96,20 +98,20 @@ func Restart(d drivers.Driver) error {
|
|||
|
||||
// MakeDiskImage makes a boot2docker VM disk image.
|
||||
func MakeDiskImage(d *drivers.BaseDriver, boot2dockerURL string, diskSize int) error {
|
||||
glog.Infof("Making disk image using store path: %s", d.StorePath)
|
||||
klog.Infof("Making disk image using store path: %s", d.StorePath)
|
||||
b2 := mcnutils.NewB2dUtils(d.StorePath)
|
||||
if err := b2.CopyIsoToMachineDir(boot2dockerURL, d.MachineName); err != nil {
|
||||
return errors.Wrap(err, "copy iso to machine dir")
|
||||
}
|
||||
|
||||
keyPath := d.GetSSHKeyPath()
|
||||
glog.Infof("Creating ssh key: %s...", keyPath)
|
||||
klog.Infof("Creating ssh key: %s...", keyPath)
|
||||
if err := ssh.GenerateSSHKey(keyPath); err != nil {
|
||||
return errors.Wrap(err, "generate ssh key")
|
||||
}
|
||||
|
||||
diskPath := GetDiskPath(d)
|
||||
glog.Infof("Creating raw disk image: %s...", diskPath)
|
||||
klog.Infof("Creating raw disk image: %s...", diskPath)
|
||||
if _, err := os.Stat(diskPath); os.IsNotExist(err) {
|
||||
if err := createRawDiskImage(publicSSHKeyPath(d), diskPath, diskSize); err != nil {
|
||||
return errors.Wrapf(err, "createRawDiskImage(%s)", diskPath)
|
||||
|
@ -123,7 +125,7 @@ func MakeDiskImage(d *drivers.BaseDriver, boot2dockerURL string, diskSize int) e
|
|||
}
|
||||
|
||||
func fixMachinePermissions(path string) error {
|
||||
glog.Infof("Fixing permissions on %s ...", path)
|
||||
klog.Infof("Fixing permissions on %s ...", path)
|
||||
if err := os.Chown(path, syscall.Getuid(), syscall.Getegid()); err != nil {
|
||||
return errors.Wrap(err, "chown dir")
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ func Test_createDiskImage(t *testing.T) {
|
|||
diskPath := filepath.Join(tmpdir, "disk")
|
||||
|
||||
sizeInMb := 100
|
||||
sizeInBytes := int64(sizeInMb) * 1000000
|
||||
sizeInBytes := int64(104857600)
|
||||
if err := createRawDiskImage(sshPath, diskPath, sizeInMb); err != nil {
|
||||
t.Errorf("createDiskImage() error = %v", err)
|
||||
}
|
||||
|
|
|
@ -28,8 +28,8 @@ import (
|
|||
"github.com/docker/machine/libmachine/drivers"
|
||||
"github.com/docker/machine/libmachine/ssh"
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog/v2"
|
||||
pkgdrivers "k8s.io/minikube/pkg/drivers"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
|
@ -37,6 +37,8 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/download"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/sysinit"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
@ -76,11 +78,22 @@ func (d *Driver) Create() error {
|
|||
CPUs: strconv.Itoa(d.NodeConfig.CPU),
|
||||
Memory: strconv.Itoa(d.NodeConfig.Memory) + "mb",
|
||||
Envs: d.NodeConfig.Envs,
|
||||
ExtraArgs: []string{"--expose", fmt.Sprintf("%d", d.NodeConfig.APIServerPort)},
|
||||
ExtraArgs: append([]string{"--expose", fmt.Sprintf("%d", d.NodeConfig.APIServerPort)}, d.NodeConfig.ExtraArgs...),
|
||||
OCIBinary: d.NodeConfig.OCIBinary,
|
||||
APIServerPort: d.NodeConfig.APIServerPort,
|
||||
}
|
||||
|
||||
if gateway, err := oci.CreateNetwork(d.OCIBinary, d.NodeConfig.ClusterName); err != nil {
|
||||
out.WarningT("Unable to create dedicated network, this might result in cluster IP change after restart: {{.error}}", out.V{"error": err})
|
||||
} else {
|
||||
params.Network = d.NodeConfig.ClusterName
|
||||
ip := gateway.To4()
|
||||
// calculate the container IP based on guessing the machine index
|
||||
ip[3] += byte(driver.IndexFromMachineName(d.NodeConfig.MachineName))
|
||||
klog.Infof("calculated static IP %q for the %q container", ip.String(), d.NodeConfig.MachineName)
|
||||
params.IP = ip.String()
|
||||
}
|
||||
|
||||
// control plane specific options
|
||||
params.PortMappings = append(params.PortMappings, oci.PortMapping{
|
||||
ListenAddress: oci.DefaultBindIPV4,
|
||||
|
@ -102,14 +115,14 @@ func (d *Driver) Create() error {
|
|||
|
||||
exists, err := oci.ContainerExists(d.OCIBinary, params.Name, true)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to check if container already exists: %v", err)
|
||||
klog.Warningf("failed to check if container already exists: %v", err)
|
||||
}
|
||||
if exists {
|
||||
// if container was created by minikube it is safe to delete and recreate it.
|
||||
if oci.IsCreatedByMinikube(d.OCIBinary, params.Name) {
|
||||
glog.Info("Found already existing abandoned minikube container, will try to delete.")
|
||||
klog.Info("Found already existing abandoned minikube container, will try to delete.")
|
||||
if err := oci.DeleteContainer(d.OCIBinary, params.Name); err != nil {
|
||||
glog.Errorf("Failed to delete a conflicting minikube container %s. You might need to restart your %s daemon and delete it manually and try again: %v", params.Name, params.OCIBinary, err)
|
||||
klog.Errorf("Failed to delete a conflicting minikube container %s. You might need to restart your %s daemon and delete it manually and try again: %v", params.Name, params.OCIBinary, err)
|
||||
}
|
||||
} else {
|
||||
// The conflicting container name was not created by minikube
|
||||
|
@ -124,6 +137,7 @@ func (d *Driver) Create() error {
|
|||
|
||||
var waitForPreload sync.WaitGroup
|
||||
waitForPreload.Add(1)
|
||||
var pErr error
|
||||
go func() {
|
||||
defer waitForPreload.Done()
|
||||
// If preload doesn't exist, don't bother extracting tarball to volume
|
||||
|
@ -131,14 +145,21 @@ func (d *Driver) Create() error {
|
|||
return
|
||||
}
|
||||
t := time.Now()
|
||||
glog.Infof("Starting extracting preloaded images to volume ...")
|
||||
klog.Infof("Starting extracting preloaded images to volume ...")
|
||||
// Extract preloaded images to container
|
||||
if err := oci.ExtractTarballToVolume(d.NodeConfig.OCIBinary, download.TarballPath(d.NodeConfig.KubernetesVersion, d.NodeConfig.ContainerRuntime), params.Name, d.NodeConfig.ImageDigest); err != nil {
|
||||
glog.Infof("Unable to extract preloaded tarball to volume: %v", err)
|
||||
if strings.Contains(err.Error(), "No space left on device") {
|
||||
pErr = oci.ErrInsufficientDockerStorage
|
||||
return
|
||||
}
|
||||
klog.Infof("Unable to extract preloaded tarball to volume: %v", err)
|
||||
} else {
|
||||
glog.Infof("duration metric: took %f seconds to extract preloaded images to volume", time.Since(t).Seconds())
|
||||
klog.Infof("duration metric: took %f seconds to extract preloaded images to volume", time.Since(t).Seconds())
|
||||
}
|
||||
}()
|
||||
if pErr == oci.ErrInsufficientDockerStorage {
|
||||
return pErr
|
||||
}
|
||||
|
||||
if err := oci.CreateContainerNode(params); err != nil {
|
||||
return errors.Wrap(err, "create kic node")
|
||||
|
@ -155,7 +176,7 @@ func (d *Driver) Create() error {
|
|||
// prepareSSH will generate keys and copy to the container so minikube ssh works
|
||||
func (d *Driver) prepareSSH() error {
|
||||
keyPath := d.GetSSHKeyPath()
|
||||
glog.Infof("Creating ssh key for kic: %s...", keyPath)
|
||||
klog.Infof("Creating ssh key for kic: %s...", keyPath)
|
||||
if err := ssh.GenerateSSHKey(keyPath); err != nil {
|
||||
return errors.Wrap(err, "generate ssh key")
|
||||
}
|
||||
|
@ -255,11 +276,11 @@ func (d *Driver) Kill() error {
|
|||
// on init this doesn't get filled when called from cmd
|
||||
d.exec = command.NewKICRunner(d.MachineName, d.OCIBinary)
|
||||
if err := sysinit.New(d.exec).ForceStop("kubelet"); err != nil {
|
||||
glog.Warningf("couldn't force stop kubelet. will continue with kill anyways: %v", err)
|
||||
klog.Warningf("couldn't force stop kubelet. will continue with kill anyways: %v", err)
|
||||
}
|
||||
|
||||
if err := oci.ShutDown(d.OCIBinary, d.MachineName); err != nil {
|
||||
glog.Warningf("couldn't shutdown the container, will continue with kill anyways: %v", err)
|
||||
klog.Warningf("couldn't shutdown the container, will continue with kill anyways: %v", err)
|
||||
}
|
||||
|
||||
cr := command.NewExecRunner() // using exec runner for interacting with dameon.
|
||||
|
@ -272,7 +293,7 @@ func (d *Driver) Kill() error {
|
|||
// Remove will delete the Kic Node Container
|
||||
func (d *Driver) Remove() error {
|
||||
if _, err := oci.ContainerID(d.OCIBinary, d.MachineName); err != nil {
|
||||
glog.Infof("could not find the container %s to remove it. will try anyways", d.MachineName)
|
||||
klog.Infof("could not find the container %s to remove it. will try anyways", d.MachineName)
|
||||
}
|
||||
|
||||
if err := oci.DeleteContainer(d.NodeConfig.OCIBinary, d.MachineName); err != nil {
|
||||
|
@ -289,6 +310,10 @@ func (d *Driver) Remove() error {
|
|||
if id, err := oci.ContainerID(d.OCIBinary, d.MachineName); err == nil && id != "" {
|
||||
return fmt.Errorf("expected no container ID be found for %q after delete. but got %q", d.MachineName, id)
|
||||
}
|
||||
|
||||
if err := oci.RemoveNetwork(d.NodeConfig.ClusterName); err != nil {
|
||||
klog.Warningf("failed to remove network (which might be okay) %s: %v", d.NodeConfig.ClusterName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -296,7 +321,7 @@ func (d *Driver) Remove() error {
|
|||
func (d *Driver) Restart() error {
|
||||
s, err := d.GetState()
|
||||
if err != nil {
|
||||
glog.Warningf("get state during restart: %v", err)
|
||||
klog.Warningf("get state during restart: %v", err)
|
||||
}
|
||||
if s == state.Stopped { // don't stop if already stopped
|
||||
return d.Start()
|
||||
|
@ -328,7 +353,7 @@ func (d *Driver) Start() error {
|
|||
if s != state.Running {
|
||||
return fmt.Errorf("expected container state be running but got %q", s)
|
||||
}
|
||||
glog.Infof("container %q state is running.", d.MachineName)
|
||||
klog.Infof("container %q state is running.", d.MachineName)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -351,35 +376,35 @@ func (d *Driver) Stop() error {
|
|||
// docker does not send right SIG for systemd to know to stop the systemd.
|
||||
// to avoid bind address be taken on an upgrade. more info https://github.com/kubernetes/minikube/issues/7171
|
||||
if err := sysinit.New(d.exec).Stop("kubelet"); err != nil {
|
||||
glog.Warningf("couldn't stop kubelet. will continue with stop anyways: %v", err)
|
||||
klog.Warningf("couldn't stop kubelet. will continue with stop anyways: %v", err)
|
||||
if err := sysinit.New(d.exec).ForceStop("kubelet"); err != nil {
|
||||
glog.Warningf("couldn't force stop kubelet. will continue with stop anyways: %v", err)
|
||||
klog.Warningf("couldn't force stop kubelet. will continue with stop anyways: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
runtime, err := cruntime.New(cruntime.Config{Type: d.NodeConfig.ContainerRuntime, Runner: d.exec})
|
||||
if err != nil { // won't return error because:
|
||||
// even though we can't stop the cotainers inside, we still wanna stop the minikube container itself
|
||||
glog.Errorf("unable to get container runtime: %v", err)
|
||||
klog.Errorf("unable to get container runtime: %v", err)
|
||||
} else {
|
||||
containers, err := runtime.ListContainers(cruntime.ListOptions{Namespaces: constants.DefaultNamespaces})
|
||||
if err != nil {
|
||||
glog.Infof("unable list containers : %v", err)
|
||||
klog.Infof("unable list containers : %v", err)
|
||||
}
|
||||
if len(containers) > 0 {
|
||||
if err := runtime.StopContainers(containers); err != nil {
|
||||
glog.Infof("unable to stop containers : %v", err)
|
||||
klog.Infof("unable to stop containers : %v", err)
|
||||
}
|
||||
if err := runtime.KillContainers(containers); err != nil {
|
||||
glog.Errorf("unable to kill containers : %v", err)
|
||||
klog.Errorf("unable to kill containers : %v", err)
|
||||
}
|
||||
}
|
||||
glog.Infof("successfully stopped kubernetes!")
|
||||
klog.Infof("successfully stopped kubernetes!")
|
||||
|
||||
}
|
||||
|
||||
if err := killAPIServerProc(d.exec); err != nil {
|
||||
glog.Warningf("couldn't stop kube-apiserver proc: %v", err)
|
||||
klog.Warningf("couldn't stop kube-apiserver proc: %v", err)
|
||||
}
|
||||
|
||||
cmd := exec.Command(d.NodeConfig.OCIBinary, "stop", d.MachineName)
|
||||
|
@ -402,8 +427,8 @@ func killAPIServerProc(runner command.Runner) error {
|
|||
if err == nil { // this means we might have a running kube-apiserver
|
||||
pid, err := strconv.Atoi(rr.Stdout.String())
|
||||
if err == nil { // this means we have a valid pid
|
||||
glog.Warningf("Found a kube-apiserver running with pid %d, will try to kill the proc", pid)
|
||||
if _, err = runner.RunCmd(exec.Command("pkill", "-9", string(pid))); err != nil {
|
||||
klog.Warningf("Found a kube-apiserver running with pid %d, will try to kill the proc", pid)
|
||||
if _, err = runner.RunCmd(exec.Command("pkill", "-9", fmt.Sprint(pid))); err != nil {
|
||||
return errors.Wrap(err, "kill")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/style"
|
||||
)
|
||||
|
@ -107,7 +107,7 @@ func runCmd(cmd *exec.Cmd, warnSlow ...bool) (*RunResult, error) {
|
|||
}
|
||||
|
||||
rr := &RunResult{Args: cmd.Args}
|
||||
glog.Infof("Run: %v", rr.Command())
|
||||
klog.Infof("Run: %v", rr.Command())
|
||||
|
||||
var outb, errb io.Writer
|
||||
if cmd.Stdout == nil {
|
||||
|
@ -144,12 +144,14 @@ func runCmd(cmd *exec.Cmd, warnSlow ...bool) (*RunResult, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
rr.ExitCode = exitError.ExitCode()
|
||||
if ex, ok := err.(*exec.ExitError); ok {
|
||||
klog.Warningf("%s returned with exit code %d", rr.Command(), ex.ExitCode())
|
||||
rr.ExitCode = ex.ExitCode()
|
||||
}
|
||||
|
||||
// Decrease log spam
|
||||
if elapsed > (1 * time.Second) {
|
||||
glog.Infof("Completed: %s: (%s)", rr.Command(), elapsed)
|
||||
klog.Infof("Completed: %s: (%s)", rr.Command(), elapsed)
|
||||
}
|
||||
if err == nil {
|
||||
return rr, nil
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// FailFastError type is an error that could not be solved by trying again
|
||||
|
@ -39,40 +39,59 @@ var ErrWindowsContainers = &FailFastError{errors.New("docker container type is w
|
|||
// ErrCPUCountLimit is thrown when docker daemon doesn't have enough CPUs for the requested container
|
||||
var ErrCPUCountLimit = &FailFastError{errors.New("not enough CPUs is available for container")}
|
||||
|
||||
// ErrIPinUse is thrown when the container been given an IP used by another container
|
||||
var ErrIPinUse = &FailFastError{errors.New("can't create with that IP, address already in use")}
|
||||
|
||||
// ErrExitedUnexpectedly is thrown when container is created/started without error but later it exists and it's status is not running anymore.
|
||||
var ErrExitedUnexpectedly = errors.New("container exited unexpectedly")
|
||||
|
||||
// ErrDaemonInfo is thrown when docker/podman info is failing or not responding
|
||||
var ErrDaemonInfo = errors.New("daemon info not responding")
|
||||
|
||||
// ErrInsufficientDockerStorage is thrown when there is not more storage for docker
|
||||
var ErrInsufficientDockerStorage = &FailFastError{errors.New("insufficient docker storage, no space left on device")}
|
||||
|
||||
// ErrNetworkSubnetTaken is thrown when a subnet is taken by another network
|
||||
var ErrNetworkSubnetTaken = errors.New("subnet is taken")
|
||||
|
||||
// ErrNetworkNotFound is when given network was not found
|
||||
var ErrNetworkNotFound = errors.New("kic network not found")
|
||||
|
||||
// ErrNetworkGatewayTaken is when given network gatway is taken
|
||||
var ErrNetworkGatewayTaken = errors.New("network gateway is taken")
|
||||
|
||||
// ErrNetworkInUse is when trying to delete a network which is attached to another container
|
||||
var ErrNetworkInUse = errors.New("unable to delete a network that is attached to a running container")
|
||||
|
||||
// LogContainerDebug will print relevant docker/podman infos after a container fails
|
||||
func LogContainerDebug(ociBin string, name string) string {
|
||||
rr, err := containerInspect(ociBin, name)
|
||||
if err != nil {
|
||||
glog.Warningf("Filed to get postmortem inspect. %s :%v", rr.Command(), err)
|
||||
klog.Warningf("Failed to get postmortem inspect. %s :%v", rr.Command(), err)
|
||||
} else {
|
||||
glog.Infof("Postmortem inspect (%q): %s", rr.Command(), rr.Output())
|
||||
klog.Infof("Postmortem inspect (%q): %s", rr.Command(), rr.Output())
|
||||
}
|
||||
|
||||
rr, err = containerLogs(ociBin, name)
|
||||
if err != nil {
|
||||
glog.Warningf("Filed to get postmortem logs. %s :%v", rr.Command(), err)
|
||||
klog.Warningf("Failed to get postmortem logs. %s :%v", rr.Command(), err)
|
||||
} else {
|
||||
glog.Infof("Postmortem logs (%q): %s", rr.Command(), rr.Output())
|
||||
klog.Infof("Postmortem logs (%q): %s", rr.Command(), rr.Output())
|
||||
}
|
||||
if ociBin == Docker {
|
||||
di, err := dockerSystemInfo()
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to get postmortem docker info: %v", err)
|
||||
klog.Warningf("Failed to get postmortem docker info: %v", err)
|
||||
} else {
|
||||
glog.Infof("postmortem docker info: %+v", di)
|
||||
klog.Infof("postmortem docker info: %+v", di)
|
||||
}
|
||||
logDockerNetworkInspect(name)
|
||||
} else {
|
||||
pi, err := podmanSystemInfo()
|
||||
if err != nil {
|
||||
glog.Warningf("couldn't get postmortem info, failed to to run podman info: %v", err)
|
||||
klog.Warningf("couldn't get postmortem info, failed to to run podman info: %v", err)
|
||||
} else {
|
||||
glog.Infof("postmortem podman info: %+v", pi)
|
||||
klog.Infof("postmortem podman info: %+v", pi)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,19 +23,23 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// SysInfo Info represents common system Information between docker and podman that minikube cares
|
||||
type SysInfo struct {
|
||||
CPUs int // CPUs is Number of CPUs
|
||||
TotalMemory int64 // TotalMemory Total available ram
|
||||
OSType string // container's OsType (windows or linux)
|
||||
Swarm bool // Weather or not the docker swarm is active
|
||||
StorageDriver string // the storage driver for the daemon (for example overlay2)
|
||||
CPUs int // CPUs is Number of CPUs
|
||||
TotalMemory int64 // TotalMemory Total available ram
|
||||
OSType string // container's OsType (windows or linux)
|
||||
Swarm bool // Weather or not the docker swarm is active
|
||||
StorageDriver string // the storage driver for the daemon (for example overlay2)
|
||||
Errors []string // any server issues
|
||||
}
|
||||
|
||||
var cachedSysInfo *SysInfo
|
||||
var cachedSysInfoErr *error
|
||||
var (
|
||||
cachedSysInfo *SysInfo
|
||||
cachedSysInfoErr *error
|
||||
)
|
||||
|
||||
// CachedDaemonInfo will run and return a docker/podman info only once per minikube run time. to avoid performance
|
||||
func CachedDaemonInfo(ociBin string) (SysInfo, error) {
|
||||
|
@ -58,7 +62,7 @@ func DaemonInfo(ociBin string) (SysInfo, error) {
|
|||
return *cachedSysInfo, err
|
||||
}
|
||||
d, err := dockerSystemInfo()
|
||||
cachedSysInfo = &SysInfo{CPUs: d.NCPU, TotalMemory: d.MemTotal, OSType: d.OSType, Swarm: d.Swarm.LocalNodeState == "active", StorageDriver: d.Driver}
|
||||
cachedSysInfo = &SysInfo{CPUs: d.NCPU, TotalMemory: d.MemTotal, OSType: d.OSType, Swarm: d.Swarm.LocalNodeState == "active", StorageDriver: d.Driver, Errors: d.ServerErrors}
|
||||
return *cachedSysInfo, err
|
||||
}
|
||||
|
||||
|
@ -163,6 +167,7 @@ type dockerSysInfo struct {
|
|||
SecurityOptions []string `json:"SecurityOptions"`
|
||||
ProductLicense string `json:"ProductLicense"`
|
||||
Warnings interface{} `json:"Warnings"`
|
||||
ServerErrors []string
|
||||
ClientInfo struct {
|
||||
Debug bool `json:"Debug"`
|
||||
Plugins []interface{} `json:"Plugins"`
|
||||
|
@ -245,6 +250,7 @@ func dockerSystemInfo() (dockerSysInfo, error) {
|
|||
return ds, errors.Wrapf(err, "unmarshal docker system info")
|
||||
}
|
||||
|
||||
klog.Infof("docker info: %+v", ds)
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
|
@ -264,5 +270,6 @@ func podmanSystemInfo() (podmanSysInfo, error) {
|
|||
if err := json.Unmarshal([]byte(strings.TrimSpace(rawJSON)), &ps); err != nil {
|
||||
return ps, errors.Wrapf(err, "unmarshal podman system info")
|
||||
}
|
||||
klog.Infof("podman info: %+v", ps)
|
||||
return ps, nil
|
||||
}
|
||||
|
|
|
@ -25,23 +25,33 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// RoutableHostIPFromInside returns the ip/dns of the host that container lives on
|
||||
// is routable from inside the container
|
||||
func RoutableHostIPFromInside(ociBin string, containerName string) (net.IP, error) {
|
||||
func RoutableHostIPFromInside(ociBin string, clusterName string, containerName string) (net.IP, error) {
|
||||
if ociBin == Docker {
|
||||
if runtime.GOOS == "linux" {
|
||||
return dockerGatewayIP(containerName)
|
||||
_, gateway, err := dockerNetworkInspect(clusterName)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNetworkNotFound) {
|
||||
klog.Infof("The container %s is not attached to a network, this could be because the cluster was created by minikube <v1.14, will try to get the IP using container gatway", containerName)
|
||||
|
||||
return containerGatewayIP(Docker, containerName)
|
||||
}
|
||||
return gateway, errors.Wrap(err, "network inspect")
|
||||
}
|
||||
return gateway, nil
|
||||
}
|
||||
// for windows and mac, the gateway ip is not routable so we use dns trick.
|
||||
return digDNS(ociBin, containerName, "host.docker.internal")
|
||||
}
|
||||
|
||||
// podman
|
||||
if runtime.GOOS == "linux" {
|
||||
return containerGatewayIP(ociBin, containerName)
|
||||
return containerGatewayIP(Podman, containerName)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("RoutableHostIPFromInside is currently only implemented for linux")
|
||||
|
@ -55,60 +65,12 @@ func digDNS(ociBin, containerName, dns string) (net.IP, error) {
|
|||
return ip, errors.Wrapf(err, "resolve dns to ip")
|
||||
}
|
||||
|
||||
glog.Infof("got host ip for mount in container by digging dns: %s", ip.String())
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
// profileInContainers checks whether the profile is within the containers list
|
||||
func profileInContainers(profile string, containers []string) bool {
|
||||
for _, container := range containers {
|
||||
if container == profile {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// dockerGatewayIP gets the default gateway ip for the docker bridge on the user's host machine
|
||||
// gets the ip from user's host docker
|
||||
func dockerGatewayIP(profile string) (net.IP, error) {
|
||||
var bridgeID string
|
||||
rr, err := runCmd(exec.Command(Docker, "network", "ls", "--filter", "name=bridge", "--format", "{{.ID}}"))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "get network bridge")
|
||||
}
|
||||
networksOutput := strings.TrimSpace(rr.Stdout.String())
|
||||
networksSlice := strings.Fields(networksOutput)
|
||||
// Look for the minikube container within each docker network
|
||||
for _, net := range networksSlice {
|
||||
// get all containers in the network
|
||||
rs, err := runCmd(exec.Command(Docker, "network", "inspect", net, "-f", "{{range $k, $v := .Containers}}{{$v.Name}} {{end}}"))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "get containers in network")
|
||||
}
|
||||
containersSlice := strings.Fields(rs.Stdout.String())
|
||||
if profileInContainers(profile, containersSlice) {
|
||||
bridgeID = net
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if bridgeID == "" {
|
||||
return nil, errors.Errorf("unable to determine bridge network id from %q", networksOutput)
|
||||
}
|
||||
rr, err = runCmd(exec.Command(Docker, "network", "inspect",
|
||||
"--format", "{{(index .IPAM.Config 0).Gateway}}", bridgeID))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "inspect IP bridge network %q.", bridgeID)
|
||||
}
|
||||
|
||||
ip := net.ParseIP(strings.TrimSpace(rr.Stdout.String()))
|
||||
glog.Infof("got host ip for mount in container by inspect docker network: %s", ip.String())
|
||||
klog.Infof("got host ip for mount in container by digging dns: %s", ip.String())
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
// containerGatewayIP gets the default gateway ip for the container
|
||||
func containerGatewayIP(ociBin, containerName string) (net.IP, error) {
|
||||
func containerGatewayIP(ociBin string, containerName string) (net.IP, error) {
|
||||
rr, err := runCmd(exec.Command(ociBin, "container", "inspect", "--format", "{{.NetworkSettings.Gateway}}", containerName))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "inspect gateway")
|
||||
|
|
|
@ -0,0 +1,204 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package oci
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// firstSubnetAddr subnet to be used on first kic cluster
|
||||
// it is one octet more than the one used by KVM to avoid possible conflict
|
||||
const firstSubnetAddr = "192.168.49.0"
|
||||
|
||||
// big enough for a cluster of 254 nodes
|
||||
const defaultSubnetMask = 24
|
||||
|
||||
// CreateNetwork creates a network returns gateway and error, minikube creates one network per cluster
|
||||
func CreateNetwork(ociBin string, name string) (net.IP, error) {
|
||||
if ociBin != Docker {
|
||||
return nil, fmt.Errorf("%s network not implemented yet", ociBin)
|
||||
}
|
||||
return createDockerNetwork(name)
|
||||
}
|
||||
|
||||
func createDockerNetwork(clusterName string) (net.IP, error) {
|
||||
// check if the network already exists
|
||||
subnet, gateway, err := dockerNetworkInspect(clusterName)
|
||||
if err == nil {
|
||||
klog.Infof("Found existing network with subnet %s and gateway %s.", subnet, gateway)
|
||||
return gateway, nil
|
||||
}
|
||||
|
||||
attempts := 0
|
||||
subnetAddr := firstSubnetAddr
|
||||
// Rather than iterate through all of the valid subnets, give up at 20 to avoid a lengthy user delay for something that is unlikely to work.
|
||||
// will be like 192.168.49.0/24 ,...,192.168.239.0/24
|
||||
for attempts < 20 {
|
||||
gateway, err = tryCreateDockerNetwork(subnetAddr, defaultSubnetMask, clusterName)
|
||||
if err == nil {
|
||||
return gateway, nil
|
||||
}
|
||||
|
||||
// don't retry if error is not adddress is taken
|
||||
if !(errors.Is(err, ErrNetworkSubnetTaken) || errors.Is(err, ErrNetworkGatewayTaken)) {
|
||||
klog.Errorf("error while trying to create network %v", err)
|
||||
return nil, errors.Wrap(err, "un-retryable")
|
||||
}
|
||||
attempts++
|
||||
// Find an open subnet by incrementing the 3rd octet by 10 for each try
|
||||
// 13 times adding 10 firstSubnetAddr "192.168.49.0/24"
|
||||
// at most it will add up to 169 which is still less than max allowed 255
|
||||
// this is large enough to try more and not too small to not try enough
|
||||
// can be tuned in the next iterations
|
||||
newSubnet := net.ParseIP(subnetAddr).To4()
|
||||
newSubnet[2] += byte(9 + attempts)
|
||||
subnetAddr = newSubnet.String()
|
||||
}
|
||||
return gateway, fmt.Errorf("failed to create network after 20 attempts")
|
||||
}
|
||||
|
||||
func tryCreateDockerNetwork(subnetAddr string, subnetMask int, name string) (net.IP, error) {
|
||||
gateway := net.ParseIP(subnetAddr)
|
||||
gateway.To4()[3]++ // first ip for gateway
|
||||
klog.Infof("attempt to create network %s/%d with subnet: %s and gateway %s...", subnetAddr, subnetMask, name, gateway)
|
||||
// options documentation https://docs.docker.com/engine/reference/commandline/network_create/#bridge-driver-options
|
||||
rr, err := runCmd(exec.Command(Docker, "network", "create", "--driver=bridge", fmt.Sprintf("--subnet=%s", fmt.Sprintf("%s/%d", subnetAddr, subnetMask)), fmt.Sprintf("--gateway=%s", gateway), "-o", "--ip-masq", "-o", "--icc", fmt.Sprintf("--label=%s=%s", CreatedByLabelKey, "true"), name))
|
||||
if err != nil {
|
||||
// Pool overlaps with other one on this address space
|
||||
if strings.Contains(rr.Output(), "Pool overlaps") {
|
||||
return nil, ErrNetworkSubnetTaken
|
||||
}
|
||||
if strings.Contains(rr.Output(), "failed to allocate gateway") && strings.Contains(rr.Output(), "Address already in use") {
|
||||
return nil, ErrNetworkGatewayTaken
|
||||
}
|
||||
return nil, errors.Wrapf(err, "create network %s", fmt.Sprintf("%s %s/%d", name, subnetAddr, subnetMask))
|
||||
}
|
||||
return gateway, nil
|
||||
}
|
||||
|
||||
// returns subnet and gate if exists
|
||||
func dockerNetworkInspect(name string) (*net.IPNet, net.IP, error) {
|
||||
cmd := exec.Command(Docker, "network", "inspect", name, "--format", "{{(index .IPAM.Config 0).Subnet}},{{(index .IPAM.Config 0).Gateway}}")
|
||||
rr, err := runCmd(cmd)
|
||||
if err != nil {
|
||||
logDockerNetworkInspect(name)
|
||||
if strings.Contains(rr.Output(), "No such network") {
|
||||
return nil, nil, ErrNetworkNotFound
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
// results looks like 172.17.0.0/16,172.17.0.1
|
||||
ips := strings.Split(strings.TrimSpace(rr.Stdout.String()), ",")
|
||||
if len(ips) == 0 {
|
||||
return nil, nil, fmt.Errorf("empty IP list parsed from: %q", rr.Output())
|
||||
}
|
||||
|
||||
_, subnet, err := net.ParseCIDR(ips[0])
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "parse subnet for %s", name)
|
||||
}
|
||||
var gateway net.IP
|
||||
if len(ips) > 0 {
|
||||
gateway = net.ParseIP(ips[1])
|
||||
}
|
||||
return subnet, gateway, nil
|
||||
}
|
||||
|
||||
func logDockerNetworkInspect(name string) {
|
||||
cmd := exec.Command(Docker, "network", "inspect", name)
|
||||
klog.Infof("running %v to gather additional debugging logs...", cmd.Args)
|
||||
rr, err := runCmd(cmd)
|
||||
if err != nil {
|
||||
klog.Infof("error running %v: %v", rr.Args, err)
|
||||
}
|
||||
klog.Infof("output of %v: %v", rr.Args, rr.Output())
|
||||
}
|
||||
|
||||
// RemoveNetwork removes a network
|
||||
func RemoveNetwork(name string) error {
|
||||
if !networkExists(name) {
|
||||
return nil
|
||||
}
|
||||
rr, err := runCmd(exec.Command(Docker, "network", "remove", name))
|
||||
if err != nil {
|
||||
if strings.Contains(rr.Output(), "No such network") {
|
||||
return ErrNetworkNotFound
|
||||
}
|
||||
// Error response from daemon: error while removing network: network mynet123 id f9e1c50b89feb0b8f4b687f3501a81b618252c9907bc20666e386d0928322387 has active endpoints
|
||||
if strings.Contains(rr.Output(), "has active endpoints") {
|
||||
return ErrNetworkInUse
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func networkExists(name string) bool {
|
||||
_, _, err := dockerNetworkInspect(name)
|
||||
if err != nil && !errors.Is(err, ErrNetworkNotFound) { // log unexpected error
|
||||
klog.Warningf("Error inspecting docker network %s: %v", name, err)
|
||||
}
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// networkNamesByLabel returns all network names created by a label
|
||||
func networkNamesByLabel(ociBin string, label string) ([]string, error) {
|
||||
if ociBin != Docker {
|
||||
return nil, fmt.Errorf("%s not supported", ociBin)
|
||||
}
|
||||
|
||||
// docker network ls --filter='label=created_by.minikube.sigs.k8s.io=true' --format '{{.Name}}'
|
||||
rr, err := runCmd(exec.Command(Docker, "network", "ls", fmt.Sprintf("--filter=label=%s", label), "--format", "{{.Name}}"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var lines []string
|
||||
scanner := bufio.NewScanner(bytes.NewReader(rr.Stdout.Bytes()))
|
||||
for scanner.Scan() {
|
||||
lines = append(lines, strings.TrimSpace(scanner.Text()))
|
||||
}
|
||||
|
||||
return lines, nil
|
||||
}
|
||||
|
||||
// DeleteKICNetworks deletes all networks created by kic
|
||||
func DeleteKICNetworks() []error {
|
||||
var errs []error
|
||||
ns, err := networkNamesByLabel(Docker, CreatedByLabelKey+"=true")
|
||||
if err != nil {
|
||||
return []error{errors.Wrap(err, "list all volume")}
|
||||
}
|
||||
for _, n := range ns {
|
||||
err := RemoveNetwork(n)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errs
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -25,8 +25,9 @@ import (
|
|||
"bytes"
|
||||
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
|
@ -58,11 +59,11 @@ func DeleteContainersByLabel(ociBin string, label string) []error {
|
|||
// if it doesn't it means docker daemon is stuck and needs restart
|
||||
if err != nil {
|
||||
deleteErrs = append(deleteErrs, errors.Wrapf(err, "delete container %s: %s daemon is stuck. please try again!", c, ociBin))
|
||||
glog.Errorf("%s daemon seems to be stuck. Please try restarting your %s. :%v", ociBin, ociBin, err)
|
||||
klog.Errorf("%s daemon seems to be stuck. Please try restarting your %s. :%v", ociBin, ociBin, err)
|
||||
continue
|
||||
}
|
||||
if err := ShutDown(ociBin, c); err != nil {
|
||||
glog.Infof("couldn't shut down %s (might be okay): %v ", c, err)
|
||||
klog.Infof("couldn't shut down %s (might be okay): %v ", c, err)
|
||||
}
|
||||
|
||||
if _, err := runCmd(exec.Command(ociBin, "rm", "-f", "-v", c)); err != nil {
|
||||
|
@ -79,11 +80,11 @@ func DeleteContainer(ociBin string, name string) error {
|
|||
if err == context.DeadlineExceeded {
|
||||
out.WarningT("{{.ocibin}} is taking an unsually long time to respond, consider restarting {{.ocibin}}", out.V{"ociBin": ociBin})
|
||||
} else if err != nil {
|
||||
glog.Warningf("error getting container status, will try to delete anyways: %v", err)
|
||||
klog.Warningf("error getting container status, will try to delete anyways: %v", err)
|
||||
}
|
||||
// try to delete anyways
|
||||
if err := ShutDown(ociBin, name); err != nil {
|
||||
glog.Infof("couldn't shut down %s (might be okay): %v ", name, err)
|
||||
klog.Infof("couldn't shut down %s (might be okay): %v ", name, err)
|
||||
}
|
||||
|
||||
if _, err := runCmd(exec.Command(ociBin, "rm", "-f", "-v", name)); err != nil {
|
||||
|
@ -98,11 +99,11 @@ func PrepareContainerNode(p CreateParams) error {
|
|||
if err := createVolume(p.OCIBinary, p.Name, p.Name); err != nil {
|
||||
return errors.Wrapf(err, "creating volume for %s container", p.Name)
|
||||
}
|
||||
glog.Infof("Successfully created a %s volume %s", p.OCIBinary, p.Name)
|
||||
klog.Infof("Successfully created a %s volume %s", p.OCIBinary, p.Name)
|
||||
if err := prepareVolume(p.OCIBinary, p.Image, p.Name); err != nil {
|
||||
return errors.Wrapf(err, "preparing volume for %s container", p.Name)
|
||||
}
|
||||
glog.Infof("Successfully prepared a %s volume %s", p.OCIBinary, p.Name)
|
||||
klog.Infof("Successfully prepared a %s volume %s", p.OCIBinary, p.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -115,7 +116,7 @@ func CreateContainerNode(p CreateParams) error {
|
|||
return ErrWindowsContainers
|
||||
}
|
||||
if err != nil {
|
||||
glog.Warningf("error getting dameon info: %v", err)
|
||||
klog.Warningf("error getting dameon info: %v", err)
|
||||
return errors.Wrap(err, "daemon info")
|
||||
}
|
||||
}
|
||||
|
@ -149,7 +150,7 @@ func CreateContainerNode(p CreateParams) error {
|
|||
if runtime.GOOS == "linux" {
|
||||
if _, err := os.Stat("/sys/fs/cgroup/memory/memsw.limit_in_bytes"); os.IsNotExist(err) {
|
||||
// requires CONFIG_MEMCG_SWAP_ENABLED or cgroup_enable=memory in grub
|
||||
glog.Warning("Your kernel does not support swap limit capabilities or the cgroup is not mounted.")
|
||||
klog.Warning("Your kernel does not support swap limit capabilities or the cgroup is not mounted.")
|
||||
memcgSwap = false
|
||||
}
|
||||
}
|
||||
|
@ -169,6 +170,11 @@ func CreateContainerNode(p CreateParams) error {
|
|||
virtualization = "podman" // VIRTUALIZATION_PODMAN
|
||||
}
|
||||
if p.OCIBinary == Docker {
|
||||
// to provide a static IP for docker
|
||||
if p.Network != "" && p.IP != "" {
|
||||
runArgs = append(runArgs, "--network", p.Network)
|
||||
runArgs = append(runArgs, "--ip", p.IP)
|
||||
}
|
||||
runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var", p.Name))
|
||||
// ignore apparmore github actions docker: https://github.com/kubernetes/minikube/issues/7624
|
||||
runArgs = append(runArgs, "--security-opt", "apparmor=unconfined")
|
||||
|
@ -191,7 +197,7 @@ func CreateContainerNode(p CreateParams) error {
|
|||
}
|
||||
if !cpuCfsPeriod || !cpuCfsQuota {
|
||||
// requires CONFIG_CFS_BANDWIDTH
|
||||
glog.Warning("Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.")
|
||||
klog.Warning("Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -236,7 +242,7 @@ func CreateContainerNode(p CreateParams) error {
|
|||
if !iptablesFileExists(p.OCIBinary, p.Name) {
|
||||
return fmt.Errorf("iptables file doesn't exist, see #8179")
|
||||
}
|
||||
glog.Infof("the created container %q has a running status.", p.Name)
|
||||
klog.Infof("the created container %q has a running status.", p.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -285,6 +291,10 @@ func createContainer(ociBin string, image string, opts ...createOpt) error {
|
|||
if strings.Contains(rr.Output(), "Range of CPUs is from") && strings.Contains(rr.Output(), "CPUs available") { // CPUs available
|
||||
return ErrCPUCountLimit
|
||||
}
|
||||
// example: docker: Error response from daemon: Address already in use.
|
||||
if strings.Contains(rr.Output(), "Address already in use") {
|
||||
return ErrIPinUse
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -513,7 +523,7 @@ func ListContainersByLabel(ociBin string, label string, warnSlow ...bool) ([]str
|
|||
func PointToHostDockerDaemon() error {
|
||||
p := os.Getenv(constants.MinikubeActiveDockerdEnv)
|
||||
if p != "" {
|
||||
glog.Infof("shell is pointing to dockerd inside minikube. will unset to use host")
|
||||
klog.Infof("shell is pointing to dockerd inside minikube. will unset to use host")
|
||||
}
|
||||
|
||||
for i := range constants.DockerDaemonEnvs {
|
||||
|
@ -531,7 +541,7 @@ func PointToHostDockerDaemon() error {
|
|||
func PointToHostPodman() error {
|
||||
p := os.Getenv(constants.MinikubeActivePodmanEnv)
|
||||
if p != "" {
|
||||
glog.Infof("shell is pointing to podman inside minikube. will unset to use host")
|
||||
klog.Infof("shell is pointing to podman inside minikube. will unset to use host")
|
||||
}
|
||||
|
||||
for i := range constants.PodmanRemoteEnvs {
|
||||
|
@ -582,7 +592,7 @@ func ContainerStatus(ociBin string, name string, warnSlow ...bool) (state.State,
|
|||
// to avoid containers getting stuck before delete https://github.com/kubernetes/minikube/issues/7657
|
||||
func ShutDown(ociBin string, name string) error {
|
||||
if _, err := runCmd(exec.Command(ociBin, "exec", "--privileged", "-t", name, "/bin/bash", "-c", "sudo init 0")); err != nil {
|
||||
glog.Infof("error shutdown %s: %v", name, err)
|
||||
klog.Infof("error shutdown %s: %v", name, err)
|
||||
}
|
||||
// helps with allowing docker realize the container is exited and report its status correctly.
|
||||
time.Sleep(time.Second * 1)
|
||||
|
@ -590,19 +600,19 @@ func ShutDown(ociBin string, name string) error {
|
|||
stopped := func() error {
|
||||
st, err := ContainerStatus(ociBin, name)
|
||||
if st == state.Stopped {
|
||||
glog.Infof("container %s status is %s", name, st)
|
||||
klog.Infof("container %s status is %s", name, st)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
glog.Infof("temporary error verifying shutdown: %v", err)
|
||||
klog.Infof("temporary error verifying shutdown: %v", err)
|
||||
}
|
||||
glog.Infof("temporary error: container %s status is %s but expect it to be exited", name, st)
|
||||
return errors.Wrap(err, "couldn't verify cointainer is exited. %v")
|
||||
klog.Infof("temporary error: container %s status is %s but expect it to be exited", name, st)
|
||||
return errors.Wrap(err, "couldn't verify container is exited. %v")
|
||||
}
|
||||
if err := retry.Expo(stopped, time.Millisecond*500, time.Second*20); err != nil {
|
||||
return errors.Wrap(err, "verify shutdown")
|
||||
}
|
||||
glog.Infof("Successfully shutdown container %s", name)
|
||||
klog.Infof("Successfully shutdown container %s", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -613,7 +623,7 @@ func iptablesFileExists(ociBin string, nameOrID string) bool {
|
|||
file := "/var/lib/dpkg/alternatives/iptables"
|
||||
_, err := runCmd(exec.Command(ociBin, "exec", nameOrID, "stat", file), false)
|
||||
if err != nil {
|
||||
glog.Warningf("error checking if %s exists: %v", file, err)
|
||||
klog.Warningf("error checking if %s exists: %v", file, err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
|
|
@ -43,6 +43,7 @@ const (
|
|||
|
||||
// CreateParams are parameters needed to create a container
|
||||
type CreateParams struct {
|
||||
ClusterName string // cluster(profile name) that this container belongs to
|
||||
Name string // used for container name and hostname
|
||||
Image string // container image to use to create the node.
|
||||
ClusterLabel string // label the clusters we create using minikube so we can clean up
|
||||
|
@ -56,6 +57,8 @@ type CreateParams struct {
|
|||
Envs map[string]string // environment variables to pass to the container
|
||||
ExtraArgs []string // a list of any extra option to pass to oci binary during creation time, for example --expose 8080...
|
||||
OCIBinary string // docker or podman
|
||||
Network string // network name that the container will attach to
|
||||
IP string // static IP to assign for th container in the cluster network
|
||||
}
|
||||
|
||||
// createOpt is an option for Create
|
||||
|
|
|
@ -24,15 +24,16 @@ import (
|
|||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// DeleteAllVolumesByLabel deletes all volumes that have a specific label
|
||||
// if there is no volume to delete it will return nil
|
||||
func DeleteAllVolumesByLabel(ociBin string, label string, warnSlow ...bool) []error {
|
||||
var deleteErrs []error
|
||||
glog.Infof("trying to delete all %s volumes with label %s", ociBin, label)
|
||||
klog.Infof("trying to delete all %s volumes with label %s", ociBin, label)
|
||||
|
||||
vs, err := allVolumesByLabel(ociBin, label)
|
||||
|
||||
|
@ -54,7 +55,7 @@ func DeleteAllVolumesByLabel(ociBin string, label string, warnSlow ...bool) []er
|
|||
// example: docker volume prune -f --filter label=name.minikube.sigs.k8s.io=minikube
|
||||
func PruneAllVolumesByLabel(ociBin string, label string, warnSlow ...bool) []error {
|
||||
var deleteErrs []error
|
||||
glog.Infof("trying to prune all %s volumes with label %s", ociBin, label)
|
||||
klog.Infof("trying to prune all %s volumes with label %s", ociBin, label)
|
||||
cmd := exec.Command(ociBin, "volume", "prune", "-f", "--filter", "label="+label)
|
||||
if _, err := runCmd(cmd, warnSlow...); err != nil {
|
||||
deleteErrs = append(deleteErrs, errors.Wrapf(err, "prune volume by label %s", label))
|
||||
|
|
|
@ -24,9 +24,9 @@ import (
|
|||
|
||||
const (
|
||||
// Version is the current version of kic
|
||||
Version = "v0.0.12-snapshot3"
|
||||
Version = "v0.0.13"
|
||||
// SHA of the kic base image
|
||||
baseImageSHA = "1d687ba53e19dbe5fafe4cc18aa07f269ecc4b7b622f2251b5bf569ddb474e9b"
|
||||
baseImageSHA = "4d43acbd0050148d4bc399931f1b15253b5e73815b63a67b8ab4a5c9e523403f"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -48,6 +48,7 @@ var (
|
|||
|
||||
// Config is configuration for the kic driver used by registry
|
||||
type Config struct {
|
||||
ClusterName string // The cluster the container belongs to
|
||||
MachineName string // maps to the container name being created
|
||||
CPU int // Number of CPU cores assigned to the container
|
||||
Memory int // max memory in MB
|
||||
|
@ -60,4 +61,5 @@ type Config struct {
|
|||
Envs map[string]string // key,value of environment variables passed to the node
|
||||
KubernetesVersion string // Kubernetes version to install
|
||||
ContainerRuntime string // container runtime kic is running
|
||||
ExtraArgs []string // a list of any extra option to pass to oci binary during creation time, for example --expose 8080...
|
||||
}
|
||||
|
|
|
@ -31,8 +31,8 @@ import (
|
|||
|
||||
const domainTmpl = `
|
||||
<domain type='kvm'>
|
||||
<name>{{.MachineName}}</name>
|
||||
<memory unit='MB'>{{.Memory}}</memory>
|
||||
<name>{{.MachineName}}</name>
|
||||
<memory unit='MiB'>{{.Memory}}</memory>
|
||||
<vcpu>{{.CPU}}</vcpu>
|
||||
<features>
|
||||
<acpi/>
|
||||
|
|
|
@ -22,9 +22,9 @@ import (
|
|||
|
||||
"github.com/docker/machine/libmachine/drivers"
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
knet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/klog/v2"
|
||||
pkgdrivers "k8s.io/minikube/pkg/drivers"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
|
@ -65,7 +65,7 @@ func NewDriver(c Config) *Driver {
|
|||
runtime, err := cruntime.New(cruntime.Config{Type: c.ContainerRuntime, Runner: runner})
|
||||
// Libraries shouldn't panic, but there is no way for drivers to return error :(
|
||||
if err != nil {
|
||||
glog.Fatalf("unable to create container runtime: %v", err)
|
||||
klog.Fatalf("unable to create container runtime: %v", err)
|
||||
}
|
||||
return &Driver{
|
||||
BaseDriver: &drivers.BaseDriver{
|
||||
|
@ -127,7 +127,7 @@ func (d *Driver) GetURL() (string, error) {
|
|||
func (d *Driver) GetState() (state.State, error) {
|
||||
hostname, port, err := kubeconfig.Endpoint(d.BaseDriver.MachineName)
|
||||
if err != nil {
|
||||
glog.Warningf("unable to get port: %v", err)
|
||||
klog.Warningf("unable to get port: %v", err)
|
||||
port = constants.APIServerPort
|
||||
}
|
||||
|
||||
|
@ -142,13 +142,13 @@ func (d *Driver) GetState() (state.State, error) {
|
|||
return state.Running, nil
|
||||
}
|
||||
|
||||
return kverify.KubeletStatus(d.exec), nil
|
||||
return kverify.ServiceStatus(d.exec, "kubelet"), nil
|
||||
}
|
||||
|
||||
// Kill stops a host forcefully, including any containers that we are managing.
|
||||
func (d *Driver) Kill() error {
|
||||
if err := sysinit.New(d.exec).ForceStop("kubelet"); err != nil {
|
||||
glog.Warningf("couldn't force stop kubelet. will continue with kill anyways: %v", err)
|
||||
klog.Warningf("couldn't force stop kubelet. will continue with kill anyways: %v", err)
|
||||
}
|
||||
|
||||
// First try to gracefully stop containers
|
||||
|
@ -182,10 +182,10 @@ func (d *Driver) Remove() error {
|
|||
if err := d.Kill(); err != nil {
|
||||
return errors.Wrap(err, "kill")
|
||||
}
|
||||
glog.Infof("Removing: %s", cleanupPaths)
|
||||
klog.Infof("Removing: %s", cleanupPaths)
|
||||
args := append([]string{"rm", "-rf"}, cleanupPaths...)
|
||||
if _, err := d.exec.RunCmd(exec.Command("sudo", args...)); err != nil {
|
||||
glog.Errorf("cleanup incomplete: %v", err)
|
||||
klog.Errorf("cleanup incomplete: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -212,9 +212,9 @@ func (d *Driver) Start() error {
|
|||
// Stop a host gracefully, including any containers that we are managing.
|
||||
func (d *Driver) Stop() error {
|
||||
if err := sysinit.New(d.exec).Stop("kubelet"); err != nil {
|
||||
glog.Warningf("couldn't stop kubelet. will continue with stop anyways: %v", err)
|
||||
klog.Warningf("couldn't stop kubelet. will continue with stop anyways: %v", err)
|
||||
if err := sysinit.New(d.exec).ForceStop("kubelet"); err != nil {
|
||||
glog.Warningf("couldn't force stop kubelet. will continue with stop anyways: %v", err)
|
||||
klog.Warningf("couldn't force stop kubelet. will continue with stop anyways: %v", err)
|
||||
}
|
||||
}
|
||||
containers, err := d.runtime.ListContainers(cruntime.ListOptions{})
|
||||
|
@ -226,7 +226,7 @@ func (d *Driver) Stop() error {
|
|||
return errors.Wrap(err, "stop containers")
|
||||
}
|
||||
}
|
||||
glog.Infof("none driver is stopped!")
|
||||
klog.Infof("none driver is stopped!")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -237,7 +237,7 @@ func (d *Driver) RunSSHCommandFromDriver() error {
|
|||
|
||||
// restartKubelet restarts the kubelet
|
||||
func restartKubelet(cr command.Runner) error {
|
||||
glog.Infof("restarting kubelet.service ...")
|
||||
klog.Infof("restarting kubelet.service ...")
|
||||
c := exec.Command("sudo", "systemctl", "restart", "kubelet.service")
|
||||
if _, err := cr.RunCmd(c); err != nil {
|
||||
return err
|
||||
|
|
|
@ -27,10 +27,10 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/cobra/doc"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
@ -39,7 +39,7 @@ func Docs(root *cobra.Command, path string) error {
|
|||
cmds := root.Commands()
|
||||
for _, c := range cmds {
|
||||
if c.Hidden {
|
||||
glog.Infof("Skipping generating doc for %s as it's a hidden command", c.Name())
|
||||
klog.Infof("Skipping generating doc for %s as it's a hidden command", c.Name())
|
||||
continue
|
||||
}
|
||||
contents, err := DocForCommand(c)
|
||||
|
@ -98,7 +98,7 @@ func removeHelpText(buffer *bytes.Buffer) string {
|
|||
}
|
||||
// scanner strips the ending newline
|
||||
if _, err := final.WriteString(line + "\n"); err != nil {
|
||||
glog.Warningf("error removing help text: %v", err)
|
||||
klog.Warningf("error removing help text: %v", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ func generateTitle(command *cobra.Command, w io.Writer) error {
|
|||
func saveDocForCommand(command *cobra.Command, contents []byte, path string) error {
|
||||
fp := filepath.Join(path, fmt.Sprintf("%s.md", command.Name()))
|
||||
if err := os.Remove(fp); err != nil {
|
||||
glog.Warningf("error removing %s", fp)
|
||||
klog.Warningf("error removing %s", fp)
|
||||
}
|
||||
return ioutil.WriteFile(fp, contents, 0o644)
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -35,6 +34,7 @@ import (
|
|||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/klog/v2"
|
||||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/minikube/pkg/minikube/proxy"
|
||||
"k8s.io/minikube/pkg/minikube/vmpath"
|
||||
|
@ -56,7 +56,7 @@ func ClientConfig(context string) (*rest.Config, error) {
|
|||
return nil, fmt.Errorf("client config: %v", err)
|
||||
}
|
||||
c = proxy.UpdateTransport(c)
|
||||
glog.V(1).Infof("client config for %s: %+v", context, c)
|
||||
klog.V(1).Infof("client config for %s: %+v", context, c)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
|
@ -72,18 +72,18 @@ func Client(context string) (*kubernetes.Clientset, error) {
|
|||
// WaitForPods waits for all matching pods to become Running or finish successfully and at least one matching pod exists.
|
||||
func WaitForPods(c kubernetes.Interface, ns string, selector string, timeOut ...time.Duration) error {
|
||||
start := time.Now()
|
||||
glog.Infof("Waiting for pod with label %q in ns %q ...", selector, ns)
|
||||
klog.Infof("Waiting for pod with label %q in ns %q ...", selector, ns)
|
||||
lastKnownPodNumber := -1
|
||||
f := func() (bool, error) {
|
||||
listOpts := meta.ListOptions{LabelSelector: selector}
|
||||
pods, err := c.CoreV1().Pods(ns).List(listOpts)
|
||||
if err != nil {
|
||||
glog.Infof("temporary error: getting Pods with label selector %q : [%v]\n", selector, err)
|
||||
klog.Infof("temporary error: getting Pods with label selector %q : [%v]\n", selector, err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if lastKnownPodNumber != len(pods.Items) {
|
||||
glog.Infof("Found %d Pods for label selector %s\n", len(pods.Items), selector)
|
||||
klog.Infof("Found %d Pods for label selector %s\n", len(pods.Items), selector)
|
||||
lastKnownPodNumber = len(pods.Items)
|
||||
}
|
||||
|
||||
|
@ -93,7 +93,7 @@ func WaitForPods(c kubernetes.Interface, ns string, selector string, timeOut ...
|
|||
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Status.Phase != core.PodRunning && pod.Status.Phase != core.PodSucceeded {
|
||||
glog.Infof("waiting for pod %q, current state: %s: [%v]\n", selector, pod.Status.Phase, err)
|
||||
klog.Infof("waiting for pod %q, current state: %s: [%v]\n", selector, pod.Status.Phase, err)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ func WaitForPods(c kubernetes.Interface, ns string, selector string, timeOut ...
|
|||
t = timeOut[0]
|
||||
}
|
||||
err := wait.PollImmediate(kconst.APICallRetryInterval, t, f)
|
||||
glog.Infof("duration metric: took %s to wait for %s ...", time.Since(start), selector)
|
||||
klog.Infof("duration metric: took %s to wait for %s ...", time.Since(start), selector)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ func WaitForRCToStabilize(c kubernetes.Interface, ns, name string, timeout time.
|
|||
*(rc.Spec.Replicas) == rc.Status.Replicas {
|
||||
return true, nil
|
||||
}
|
||||
glog.Infof("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d",
|
||||
klog.Infof("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d",
|
||||
name, rc.Generation, rc.Status.ObservedGeneration, *(rc.Spec.Replicas), rc.Status.Replicas)
|
||||
}
|
||||
return false, nil
|
||||
|
@ -168,7 +168,7 @@ func WaitForDeploymentToStabilize(c kubernetes.Interface, ns, name string, timeo
|
|||
*(dp.Spec.Replicas) == dp.Status.Replicas {
|
||||
return true, nil
|
||||
}
|
||||
glog.Infof("Waiting for deployment %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d",
|
||||
klog.Infof("Waiting for deployment %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d",
|
||||
name, dp.Generation, dp.Status.ObservedGeneration, *(dp.Spec.Replicas), dp.Status.Replicas)
|
||||
}
|
||||
return false, nil
|
||||
|
@ -182,16 +182,16 @@ func WaitForService(c kubernetes.Interface, namespace, name string, exist bool,
|
|||
_, err := c.CoreV1().Services(namespace).Get(name, meta.GetOptions{})
|
||||
switch {
|
||||
case err == nil:
|
||||
glog.Infof("Service %s in namespace %s found.", name, namespace)
|
||||
klog.Infof("Service %s in namespace %s found.", name, namespace)
|
||||
return exist, nil
|
||||
case apierr.IsNotFound(err):
|
||||
glog.Infof("Service %s in namespace %s disappeared.", name, namespace)
|
||||
klog.Infof("Service %s in namespace %s disappeared.", name, namespace)
|
||||
return !exist, nil
|
||||
case !IsRetryableAPIError(err):
|
||||
glog.Info("Non-retryable failure while getting service.")
|
||||
klog.Info("Non-retryable failure while getting service.")
|
||||
return false, err
|
||||
default:
|
||||
glog.Infof("Get service %s in namespace %s failed: %v", name, namespace, err)
|
||||
klog.Infof("Get service %s in namespace %s failed: %v", name, namespace, err)
|
||||
return false, nil
|
||||
}
|
||||
})
|
||||
|
|
|
@ -25,8 +25,9 @@ import (
|
|||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// MemorySource is the source name used for in-memory copies
|
||||
|
@ -92,7 +93,7 @@ func NewMemoryAssetTarget(d []byte, targetPath, permissions string) *MemoryAsset
|
|||
|
||||
// NewFileAsset creates a new FileAsset
|
||||
func NewFileAsset(src, targetDir, targetName, permissions string) (*FileAsset, error) {
|
||||
glog.V(4).Infof("NewFileAsset: %s -> %s", src, path.Join(targetDir, targetName))
|
||||
klog.V(4).Infof("NewFileAsset: %s -> %s", src, path.Join(targetDir, targetName))
|
||||
|
||||
f, err := os.Open(src)
|
||||
if err != nil {
|
||||
|
@ -105,7 +106,7 @@ func NewFileAsset(src, targetDir, targetName, permissions string) (*FileAsset, e
|
|||
}
|
||||
|
||||
if info.Size() == 0 {
|
||||
glog.Warningf("NewFileAsset: %s is an empty file!", src)
|
||||
klog.Warningf("NewFileAsset: %s is an empty file!", src)
|
||||
}
|
||||
|
||||
return &FileAsset{
|
||||
|
@ -123,7 +124,7 @@ func NewFileAsset(src, targetDir, targetName, permissions string) (*FileAsset, e
|
|||
func (f *FileAsset) GetLength() (flen int) {
|
||||
fi, err := os.Stat(f.SourcePath)
|
||||
if err != nil {
|
||||
glog.Errorf("stat(%q) failed: %v", f.SourcePath, err)
|
||||
klog.Errorf("stat(%q) failed: %v", f.SourcePath, err)
|
||||
return 0
|
||||
}
|
||||
return int(fi.Size())
|
||||
|
@ -133,7 +134,7 @@ func (f *FileAsset) GetLength() (flen int) {
|
|||
func (f *FileAsset) GetModTime() (time.Time, error) {
|
||||
fi, err := os.Stat(f.SourcePath)
|
||||
if err != nil {
|
||||
glog.Errorf("stat(%q) failed: %v", f.SourcePath, err)
|
||||
klog.Errorf("stat(%q) failed: %v", f.SourcePath, err)
|
||||
return time.Time{}, err
|
||||
}
|
||||
return fi.ModTime(), nil
|
||||
|
@ -248,7 +249,7 @@ func (m *BinAsset) loadData(isTemplate bool) error {
|
|||
|
||||
m.length = len(contents)
|
||||
m.reader = bytes.NewReader(contents)
|
||||
glog.V(1).Infof("Created asset %s with %d bytes", m.SourcePath, m.length)
|
||||
klog.V(1).Infof("Created asset %s with %d bytes", m.SourcePath, m.length)
|
||||
if m.length == 0 {
|
||||
return fmt.Errorf("%s is an empty asset", m.SourcePath)
|
||||
}
|
||||
|
|
|
@ -24,9 +24,10 @@ import (
|
|||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
|
@ -40,10 +41,10 @@ import (
|
|||
func TransferBinaries(cfg config.KubernetesConfig, c command.Runner, sm sysinit.Manager) error {
|
||||
ok, err := binariesExist(cfg, c)
|
||||
if err == nil && ok {
|
||||
glog.Info("Found k8s binaries, skipping transfer")
|
||||
klog.Info("Found k8s binaries, skipping transfer")
|
||||
return nil
|
||||
}
|
||||
glog.Infof("Didn't find k8s binaries: %v\nInitiating transfer...", err)
|
||||
klog.Infof("Didn't find k8s binaries: %v\nInitiating transfer...", err)
|
||||
|
||||
dir := binRoot(cfg.KubernetesVersion)
|
||||
_, err = c.RunCmd(exec.Command("sudo", "mkdir", "-p", dir))
|
||||
|
@ -62,7 +63,7 @@ func TransferBinaries(cfg config.KubernetesConfig, c command.Runner, sm sysinit.
|
|||
|
||||
if name == "kubelet" && sm.Active(name) {
|
||||
if err := sm.ForceStop(name); err != nil {
|
||||
glog.Errorf("unable to stop kubelet: %v", err)
|
||||
klog.Errorf("unable to stop kubelet: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,8 +23,8 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
)
|
||||
|
||||
|
@ -121,7 +121,7 @@ func extraConfigForComponent(component string, opts config.ExtraOptionSlice, ver
|
|||
for _, opt := range opts {
|
||||
if opt.Component == component {
|
||||
if val, ok := versionedOpts[opt.Key]; ok {
|
||||
glog.Infof("Overwriting default %s=%s with user provided %s=%s for component %s", opt.Key, val, opt.Key, opt.Value, component)
|
||||
klog.Infof("Overwriting default %s=%s with user provided %s=%s for component %s", opt.Key, val, opt.Key, opt.Value, component)
|
||||
}
|
||||
versionedOpts[opt.Key] = opt.Value
|
||||
}
|
||||
|
|
|
@ -23,8 +23,8 @@ import (
|
|||
"path"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/ktmpl"
|
||||
"k8s.io/minikube/pkg/minikube/cni"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
|
@ -81,7 +81,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana
|
|||
if overrideCIDR != "" {
|
||||
podCIDR = overrideCIDR
|
||||
}
|
||||
glog.Infof("Using pod CIDR: %s", podCIDR)
|
||||
klog.Infof("Using pod CIDR: %s", podCIDR)
|
||||
|
||||
opts := struct {
|
||||
CertDir string
|
||||
|
@ -147,11 +147,11 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana
|
|||
if version.GTE(semver.MustParse("1.17.0")) {
|
||||
configTmpl = ktmpl.V1Beta2
|
||||
}
|
||||
glog.Infof("kubeadm options: %+v", opts)
|
||||
klog.Infof("kubeadm options: %+v", opts)
|
||||
if err := configTmpl.Execute(&b, opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.Infof("kubeadm config:\n%s\n", b.String())
|
||||
klog.Infof("kubeadm config:\n%s\n", b.String())
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
|
|
|
@ -30,11 +30,11 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
|
@ -45,7 +45,7 @@ import (
|
|||
|
||||
// WaitForAPIServerProcess waits for api server to be healthy returns error if it doesn't
|
||||
func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, timeout time.Duration) error {
|
||||
glog.Infof("waiting for apiserver process to appear ...")
|
||||
klog.Infof("waiting for apiserver process to appear ...")
|
||||
err := wait.PollImmediate(time.Millisecond*500, timeout, func() (bool, error) {
|
||||
if time.Since(start) > timeout {
|
||||
return false, fmt.Errorf("cluster wait timed out during process check")
|
||||
|
@ -65,7 +65,7 @@ func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, c
|
|||
if err != nil {
|
||||
return fmt.Errorf("apiserver process never appeared")
|
||||
}
|
||||
glog.Infof("duration metric: took %s to wait for apiserver process to appear ...", time.Since(start))
|
||||
klog.Infof("duration metric: took %s to wait for apiserver process to appear ...", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,7 @@ func apiServerPID(cr command.Runner) (int, error) {
|
|||
|
||||
// WaitForHealthyAPIServer waits for api server status to be running
|
||||
func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, hostname string, port int, timeout time.Duration) error {
|
||||
glog.Infof("waiting for apiserver healthz status ...")
|
||||
klog.Infof("waiting for apiserver healthz status ...")
|
||||
hStart := time.Now()
|
||||
|
||||
healthz := func() (bool, error) {
|
||||
|
@ -96,7 +96,7 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c
|
|||
|
||||
status, err := apiServerHealthzNow(hostname, port)
|
||||
if err != nil {
|
||||
glog.Warningf("status: %v", err)
|
||||
klog.Warningf("status: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if status != state.Running {
|
||||
|
@ -114,7 +114,7 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c
|
|||
return false, fmt.Errorf("cluster wait timed out during version check")
|
||||
}
|
||||
if err := APIServerVersionMatch(client, cfg.KubernetesConfig.KubernetesVersion); err != nil {
|
||||
glog.Warningf("api server version match failed: %v", err)
|
||||
klog.Warningf("api server version match failed: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
@ -124,7 +124,7 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c
|
|||
return fmt.Errorf("controlPlane never updated to %s", cfg.KubernetesConfig.KubernetesVersion)
|
||||
}
|
||||
|
||||
glog.Infof("duration metric: took %s to wait for apiserver health ...", time.Since(hStart))
|
||||
klog.Infof("duration metric: took %s to wait for apiserver health ...", time.Since(hStart))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -134,7 +134,7 @@ func APIServerVersionMatch(client *kubernetes.Clientset, expected string) error
|
|||
if err != nil {
|
||||
return errors.Wrap(err, "server version")
|
||||
}
|
||||
glog.Infof("control plane version: %s", vi)
|
||||
klog.Infof("control plane version: %s", vi)
|
||||
if version.CompareKubeAwareVersionStrings(vi.String(), expected) != 0 {
|
||||
return fmt.Errorf("controlPane = %q, expected: %q", vi.String(), expected)
|
||||
}
|
||||
|
@ -143,26 +143,26 @@ func APIServerVersionMatch(client *kubernetes.Clientset, expected string) error
|
|||
|
||||
// APIServerStatus returns apiserver status in libmachine style state.State
|
||||
func APIServerStatus(cr command.Runner, hostname string, port int) (state.State, error) {
|
||||
glog.Infof("Checking apiserver status ...")
|
||||
klog.Infof("Checking apiserver status ...")
|
||||
|
||||
pid, err := apiServerPID(cr)
|
||||
if err != nil {
|
||||
glog.Warningf("stopped: unable to get apiserver pid: %v", err)
|
||||
klog.Warningf("stopped: unable to get apiserver pid: %v", err)
|
||||
return state.Stopped, nil
|
||||
}
|
||||
|
||||
// Get the freezer cgroup entry for this pid
|
||||
rr, err := cr.RunCmd(exec.Command("sudo", "egrep", "^[0-9]+:freezer:", fmt.Sprintf("/proc/%d/cgroup", pid)))
|
||||
if err != nil {
|
||||
glog.Warningf("unable to find freezer cgroup: %v", err)
|
||||
klog.Warningf("unable to find freezer cgroup: %v", err)
|
||||
return apiServerHealthz(hostname, port)
|
||||
|
||||
}
|
||||
freezer := strings.TrimSpace(rr.Stdout.String())
|
||||
glog.Infof("apiserver freezer: %q", freezer)
|
||||
klog.Infof("apiserver freezer: %q", freezer)
|
||||
fparts := strings.Split(freezer, ":")
|
||||
if len(fparts) != 3 {
|
||||
glog.Warningf("unable to parse freezer - found %d parts: %s", len(fparts), freezer)
|
||||
klog.Warningf("unable to parse freezer - found %d parts: %s", len(fparts), freezer)
|
||||
return apiServerHealthz(hostname, port)
|
||||
}
|
||||
|
||||
|
@ -172,16 +172,16 @@ func APIServerStatus(cr command.Runner, hostname string, port int) (state.State,
|
|||
// cat: /sys/fs/cgroup/freezer/actions_job/e62ef4349cc5a70f4b49f8a150ace391da6ad6df27073c83ecc03dbf81fde1ce/kubepods/burstable/poda1de58db0ce81d19df7999f6808def1b/5df53230fe3483fd65f341923f18a477fda92ae9cd71061168130ef164fe479c/freezer.state: No such file or directory\n"*
|
||||
// TODO: #7770 investigate how to handle this error better.
|
||||
if strings.Contains(rr.Stderr.String(), "freezer.state: No such file or directory\n") {
|
||||
glog.Infof("unable to get freezer state (might be okay and be related to #770): %s", rr.Stderr.String())
|
||||
klog.Infof("unable to get freezer state (might be okay and be related to #770): %s", rr.Stderr.String())
|
||||
} else {
|
||||
glog.Warningf("unable to get freezer state: %s", rr.Stderr.String())
|
||||
klog.Warningf("unable to get freezer state: %s", rr.Stderr.String())
|
||||
}
|
||||
|
||||
return apiServerHealthz(hostname, port)
|
||||
}
|
||||
|
||||
fs := strings.TrimSpace(rr.Stdout.String())
|
||||
glog.Infof("freezer state: %q", fs)
|
||||
klog.Infof("freezer state: %q", fs)
|
||||
if fs == "FREEZING" || fs == "FROZEN" {
|
||||
return state.Paused, nil
|
||||
}
|
||||
|
@ -218,7 +218,7 @@ func apiServerHealthz(hostname string, port int) (state.State, error) {
|
|||
// apiServerHealthzNow hits the /healthz endpoint and returns libmachine style state.State
|
||||
func apiServerHealthzNow(hostname string, port int) (state.State, error) {
|
||||
url := fmt.Sprintf("https://%s/healthz", net.JoinHostPort(hostname, fmt.Sprint(port)))
|
||||
glog.Infof("Checking apiserver healthz at %s ...", url)
|
||||
klog.Infof("Checking apiserver healthz at %s ...", url)
|
||||
// To avoid: x509: certificate signed by unknown authority
|
||||
tr := &http.Transport{
|
||||
Proxy: nil, // Avoid using a proxy to speak to a local host
|
||||
|
@ -228,17 +228,17 @@ func apiServerHealthzNow(hostname string, port int) (state.State, error) {
|
|||
resp, err := client.Get(url)
|
||||
// Connection refused, usually.
|
||||
if err != nil {
|
||||
glog.Infof("stopped: %s: %v", url, err)
|
||||
klog.Infof("stopped: %s: %v", url, err)
|
||||
return state.Stopped, nil
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
glog.Warningf("unable to read response body: %s", err)
|
||||
klog.Warningf("unable to read response body: %s", err)
|
||||
}
|
||||
|
||||
glog.Infof("%s returned %d:\n%s", url, resp.StatusCode, body)
|
||||
klog.Infof("%s returned %d:\n%s", url, resp.StatusCode, body)
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
return state.Error, fmt.Errorf("%s returned code %d (unauthorized). Check your apiserver authorization settings:\n%s", url, resp.StatusCode, body)
|
||||
}
|
||||
|
|
|
@ -20,28 +20,28 @@ package kverify
|
|||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
)
|
||||
|
||||
// WaitForDefaultSA waits for the default service account to be created.
|
||||
func WaitForDefaultSA(cs *kubernetes.Clientset, timeout time.Duration) error {
|
||||
glog.Info("waiting for default service account to be created ...")
|
||||
klog.Info("waiting for default service account to be created ...")
|
||||
start := time.Now()
|
||||
saReady := func() (bool, error) {
|
||||
// equivalent to manual check of 'kubectl --context profile get serviceaccount default'
|
||||
sas, err := cs.CoreV1().ServiceAccounts("default").List(meta.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Infof("temproary error waiting for default SA: %v", err)
|
||||
klog.Infof("temproary error waiting for default SA: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
for _, sa := range sas.Items {
|
||||
if sa.Name == "default" {
|
||||
glog.Infof("found service account: %q", sa.Name)
|
||||
klog.Infof("found service account: %q", sa.Name)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
@ -51,6 +51,6 @@ func WaitForDefaultSA(cs *kubernetes.Clientset, timeout time.Duration) error {
|
|||
return errors.Wrapf(err, "waited %s for SA", time.Since(start))
|
||||
}
|
||||
|
||||
glog.Infof("duration metric: took %s for default service account to be created ...", time.Since(start))
|
||||
klog.Infof("duration metric: took %s for default service account to be created ...", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -35,6 +35,8 @@ const (
|
|||
AppsRunningKey = "apps_running"
|
||||
// NodeReadyKey is the name used in the flags for waiting for the node status to be ready
|
||||
NodeReadyKey = "node_ready"
|
||||
// NodeReadyKey is the name used in the flags for waiting for the node status to be ready
|
||||
KubeletKey = "kubelet"
|
||||
)
|
||||
|
||||
// vars related to the --wait flag
|
||||
|
@ -42,13 +44,13 @@ var (
|
|||
// DefaultComponents is map of the the default components to wait for
|
||||
DefaultComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true}
|
||||
// NoWaitComponents is map of componets to wait for if specified 'none' or 'false'
|
||||
NoComponents = map[string]bool{APIServerWaitKey: false, SystemPodsWaitKey: false, DefaultSAWaitKey: false, AppsRunningKey: false, NodeReadyKey: false}
|
||||
NoComponents = map[string]bool{APIServerWaitKey: false, SystemPodsWaitKey: false, DefaultSAWaitKey: false, AppsRunningKey: false, NodeReadyKey: false, KubeletKey: false}
|
||||
// AllComponents is map for waiting for all components.
|
||||
AllComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true, DefaultSAWaitKey: true, AppsRunningKey: true}
|
||||
AllComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true, DefaultSAWaitKey: true, AppsRunningKey: true, KubeletKey: true}
|
||||
// DefaultWaitList is list of all default components to wait for. only names to be used for start flags.
|
||||
DefaultWaitList = []string{APIServerWaitKey, SystemPodsWaitKey}
|
||||
// AllComponentsList list of all valid components keys to wait for. only names to be used used for start flags.
|
||||
AllComponentsList = []string{APIServerWaitKey, SystemPodsWaitKey, DefaultSAWaitKey, AppsRunningKey, NodeReadyKey}
|
||||
AllComponentsList = []string{APIServerWaitKey, SystemPodsWaitKey, DefaultSAWaitKey, AppsRunningKey, NodeReadyKey, KubeletKey}
|
||||
// AppsRunningList running list are valid k8s-app components to wait for them to be running
|
||||
AppsRunningList = []string{
|
||||
"kube-dns", // coredns
|
||||
|
|
|
@ -21,11 +21,11 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
@ -98,10 +98,10 @@ func (e *ErrNetworkNotReady) Error() string {
|
|||
|
||||
// NodePressure verfies that node is not under disk, memory, pid or network pressure.
|
||||
func NodePressure(cs *kubernetes.Clientset) error {
|
||||
glog.Info("verifying NodePressure condition ...")
|
||||
klog.Info("verifying NodePressure condition ...")
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
glog.Infof("duration metric: took %s to run NodePressure ...", time.Since(start))
|
||||
klog.Infof("duration metric: took %s to run NodePressure ...", time.Since(start))
|
||||
}()
|
||||
|
||||
var ns *v1.NodeList
|
||||
|
@ -118,8 +118,8 @@ func NodePressure(cs *kubernetes.Clientset) error {
|
|||
}
|
||||
|
||||
for _, n := range ns.Items {
|
||||
glog.Infof("node storage ephemeral capacity is %s", n.Status.Capacity.StorageEphemeral())
|
||||
glog.Infof("node cpu capacity is %s", n.Status.Capacity.Cpu().AsDec())
|
||||
klog.Infof("node storage ephemeral capacity is %s", n.Status.Capacity.StorageEphemeral())
|
||||
klog.Infof("node cpu capacity is %s", n.Status.Capacity.Cpu().AsDec())
|
||||
for _, c := range n.Status.Conditions {
|
||||
pc := NodeCondition{Type: c.Type, Status: c.Status, Reason: c.Reason, Message: c.Message}
|
||||
if pc.DiskPressure() {
|
||||
|
|
|
@ -21,21 +21,21 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
)
|
||||
|
||||
// WaitForNodeReady waits till kube client reports node status as "ready"
|
||||
func WaitForNodeReady(cs *kubernetes.Clientset, timeout time.Duration) error {
|
||||
glog.Infof("waiting %s for node status to be ready ...", timeout)
|
||||
klog.Infof("waiting %s for node status to be ready ...", timeout)
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
glog.Infof("duration metric: took %s to wait for WaitForNodeReady...", time.Since(start))
|
||||
klog.Infof("duration metric: took %s to wait for WaitForNodeReady...", time.Since(start))
|
||||
}()
|
||||
checkReady := func() (bool, error) {
|
||||
if time.Since(start) > timeout {
|
||||
|
@ -43,14 +43,14 @@ func WaitForNodeReady(cs *kubernetes.Clientset, timeout time.Duration) error {
|
|||
}
|
||||
ns, err := cs.CoreV1().Nodes().List(meta.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Infof("error listing nodes will retry: %v", err)
|
||||
klog.Infof("error listing nodes will retry: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, n := range ns.Items {
|
||||
for _, c := range n.Status.Conditions {
|
||||
if c.Type == v1.NodeReady && c.Status != v1.ConditionTrue {
|
||||
glog.Infof("node %q has unwanted condition %q : Reason %q Message: %q. will try. ", n.Name, c.Type, c.Reason, c.Message)
|
||||
klog.Infof("node %q has unwanted condition %q : Reason %q Message: %q. will try. ", n.Name, c.Type, c.Reason, c.Message)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,25 +22,23 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/logs"
|
||||
"k8s.io/minikube/pkg/minikube/sysinit"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
||||
// WaitForSystemPods verifies essential pods for running kurnetes is running
|
||||
func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, timeout time.Duration) error {
|
||||
glog.Info("waiting for kube-system pods to appear ...")
|
||||
klog.Info("waiting for kube-system pods to appear ...")
|
||||
pStart := time.Now()
|
||||
|
||||
podList := func() error {
|
||||
|
@ -52,13 +50,13 @@ func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg con
|
|||
// Wait for any system pod, as waiting for apiserver may block until etcd
|
||||
pods, err := client.CoreV1().Pods("kube-system").List(meta.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Warningf("pod list returned error: %v", err)
|
||||
klog.Warningf("pod list returned error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.Infof("%d kube-system pods found", len(pods.Items))
|
||||
klog.Infof("%d kube-system pods found", len(pods.Items))
|
||||
for _, pod := range pods.Items {
|
||||
glog.Infof(podStatusMsg(pod))
|
||||
klog.Infof(podStatusMsg(pod))
|
||||
}
|
||||
|
||||
if len(pods.Items) < 2 {
|
||||
|
@ -71,7 +69,7 @@ func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg con
|
|||
if err := retry.Local(podList, timeout); err != nil {
|
||||
return fmt.Errorf("apiserver never returned a pod list")
|
||||
}
|
||||
glog.Infof("duration metric: took %s to wait for pod list to return data ...", time.Since(pStart))
|
||||
klog.Infof("duration metric: took %s to wait for pod list to return data ...", time.Since(pStart))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -83,10 +81,10 @@ func ExpectAppsRunning(cs *kubernetes.Clientset, expected []string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.Infof("%d kube-system pods found", len(pods.Items))
|
||||
klog.Infof("%d kube-system pods found", len(pods.Items))
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
glog.Infof(podStatusMsg(pod))
|
||||
klog.Infof(podStatusMsg(pod))
|
||||
|
||||
if pod.Status.Phase != core.PodRunning {
|
||||
continue
|
||||
|
@ -113,7 +111,7 @@ func ExpectAppsRunning(cs *kubernetes.Clientset, expected []string) error {
|
|||
|
||||
// WaitForAppsRunning waits for expected Apps To be running
|
||||
func WaitForAppsRunning(cs *kubernetes.Clientset, expected []string, timeout time.Duration) error {
|
||||
glog.Info("waiting for k8s-apps to be running ...")
|
||||
klog.Info("waiting for k8s-apps to be running ...")
|
||||
start := time.Now()
|
||||
|
||||
checkRunning := func() error {
|
||||
|
@ -123,7 +121,7 @@ func WaitForAppsRunning(cs *kubernetes.Clientset, expected []string, timeout tim
|
|||
if err := retry.Local(checkRunning, timeout); err != nil {
|
||||
return errors.Wrapf(err, "expected k8s-apps")
|
||||
}
|
||||
glog.Infof("duration metric: took %s to wait for k8s-apps to be running ...", time.Since(start))
|
||||
klog.Infof("duration metric: took %s to wait for k8s-apps to be running ...", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -155,13 +153,3 @@ func announceProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg conf
|
|||
time.Sleep(kconst.APICallRetryInterval * 15)
|
||||
}
|
||||
}
|
||||
|
||||
// KubeletStatus checks the kubelet status
|
||||
func KubeletStatus(cr command.Runner) state.State {
|
||||
glog.Infof("Checking kubelet status ...")
|
||||
active := sysinit.New(cr).Active("kubelet")
|
||||
if active {
|
||||
return state.Running
|
||||
}
|
||||
return state.Stopped
|
||||
}
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package kverify verifies a running Kubernetes cluster is healthy
|
||||
package kverify
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/sysinit"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
||||
// ServiceStatus checks the status of a systemd or init.d service
|
||||
func ServiceStatus(cr command.Runner, svc string) state.State {
|
||||
active := sysinit.New(cr).Active(svc)
|
||||
if active {
|
||||
return state.Running
|
||||
}
|
||||
return state.Stopped
|
||||
}
|
||||
|
||||
// WaitForService will wait for a "systemd" or "init.d" service to be running on the node...
|
||||
// not to be confused with Kubernetes Services
|
||||
func WaitForService(cr command.Runner, svc string, timeout time.Duration) error {
|
||||
pStart := time.Now()
|
||||
klog.Infof("waiting for %s service to be running ....", svc)
|
||||
kr := func() error {
|
||||
if st := ServiceStatus(cr, svc); st != state.Running {
|
||||
return fmt.Errorf("status %s", st)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := retry.Local(kr, timeout); err != nil {
|
||||
return fmt.Errorf("not running: %s", err)
|
||||
}
|
||||
|
||||
klog.Infof("duration metric: took %s WaitForService to wait for %s.", time.Since(pStart), svc)
|
||||
|
||||
return nil
|
||||
|
||||
}
|
|
@ -20,8 +20,8 @@ import (
|
|||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
)
|
||||
|
||||
|
@ -31,12 +31,12 @@ func AdjustResourceLimits(c command.Runner) error {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "oom_adj check cmd %s. ", rr.Command())
|
||||
}
|
||||
glog.Infof("apiserver oom_adj: %s", rr.Stdout.String())
|
||||
klog.Infof("apiserver oom_adj: %s", rr.Stdout.String())
|
||||
// oom_adj is already a negative number
|
||||
if strings.HasPrefix(rr.Stdout.String(), "-") {
|
||||
return nil
|
||||
}
|
||||
glog.Infof("adjusting apiserver oom_adj to -10")
|
||||
klog.Infof("adjusting apiserver oom_adj to -10")
|
||||
|
||||
// Prevent the apiserver from OOM'ing before other pods, as it is our gateway into the cluster.
|
||||
// It'd be preferable to do this via Kubernetes, but kubeadm doesn't have a way to set pod QoS.
|
||||
|
|
|
@ -29,12 +29,12 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/otiai10/copy"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/tools/clientcmd/api/latest"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
|
@ -49,7 +49,7 @@ import (
|
|||
// SetupCerts gets the generated credentials required to talk to the APIServer.
|
||||
func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) ([]assets.CopyableFile, error) {
|
||||
localPath := localpath.Profile(k8s.ClusterName)
|
||||
glog.Infof("Setting up %s for IP: %s\n", localPath, n.IP)
|
||||
klog.Infof("Setting up %s for IP: %s\n", localPath, n.IP)
|
||||
|
||||
ccs, err := generateSharedCACerts()
|
||||
if err != nil {
|
||||
|
@ -166,11 +166,11 @@ func generateSharedCACerts() (CACerts, error) {
|
|||
|
||||
for _, ca := range caCertSpecs {
|
||||
if canRead(ca.certPath) && canRead(ca.keyPath) {
|
||||
glog.Infof("skipping %s CA generation: %s", ca.subject, ca.keyPath)
|
||||
klog.Infof("skipping %s CA generation: %s", ca.subject, ca.keyPath)
|
||||
continue
|
||||
}
|
||||
|
||||
glog.Infof("generating %s CA: %s", ca.subject, ca.keyPath)
|
||||
klog.Infof("generating %s CA: %s", ca.subject, ca.keyPath)
|
||||
if err := util.GenerateCACert(ca.certPath, ca.keyPath, ca.subject); err != nil {
|
||||
return cc, errors.Wrap(err, "generate ca cert")
|
||||
}
|
||||
|
@ -266,11 +266,11 @@ func generateProfileCerts(k8s config.KubernetesConfig, n config.Node, ccs CACert
|
|||
}
|
||||
|
||||
if canRead(cp) && canRead(kp) {
|
||||
glog.Infof("skipping %s signed cert generation: %s", spec.subject, kp)
|
||||
klog.Infof("skipping %s signed cert generation: %s", spec.subject, kp)
|
||||
continue
|
||||
}
|
||||
|
||||
glog.Infof("generating %s signed cert: %s", spec.subject, kp)
|
||||
klog.Infof("generating %s signed cert: %s", spec.subject, kp)
|
||||
err := util.GenerateSignedCert(
|
||||
cp, kp, spec.subject,
|
||||
spec.ips, spec.alternateNames,
|
||||
|
@ -281,11 +281,11 @@ func generateProfileCerts(k8s config.KubernetesConfig, n config.Node, ccs CACert
|
|||
}
|
||||
|
||||
if spec.hash != "" {
|
||||
glog.Infof("copying %s -> %s", cp, spec.certPath)
|
||||
klog.Infof("copying %s -> %s", cp, spec.certPath)
|
||||
if err := copy.Copy(cp, spec.certPath); err != nil {
|
||||
return xfer, errors.Wrap(err, "copy cert")
|
||||
}
|
||||
glog.Infof("copying %s -> %s", kp, spec.keyPath)
|
||||
klog.Infof("copying %s -> %s", kp, spec.keyPath)
|
||||
if err := copy.Copy(kp, spec.keyPath); err != nil {
|
||||
return xfer, errors.Wrap(err, "copy key")
|
||||
}
|
||||
|
@ -341,11 +341,11 @@ func collectCACerts() (map[string]string, error) {
|
|||
|
||||
if ext == ".crt" || ext == ".pem" {
|
||||
if info.Size() < 32 {
|
||||
glog.Warningf("ignoring %s, impossibly tiny %d bytes", fullPath, info.Size())
|
||||
klog.Warningf("ignoring %s, impossibly tiny %d bytes", fullPath, info.Size())
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.Infof("found cert: %s (%d bytes)", fullPath, info.Size())
|
||||
klog.Infof("found cert: %s (%d bytes)", fullPath, info.Size())
|
||||
|
||||
validPem, err := isValidPEMCertificate(hostpath)
|
||||
if err != nil {
|
||||
|
@ -386,7 +386,7 @@ func getSubjectHash(cr command.Runner, filePath string) (string, error) {
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
glog.Infof("hashing: %s", lrr.Stdout.String())
|
||||
klog.Infof("hashing: %s", lrr.Stdout.String())
|
||||
|
||||
rr, err := cr.RunCmd(exec.Command("openssl", "x509", "-hash", "-noout", "-in", filePath))
|
||||
if err != nil {
|
||||
|
@ -407,7 +407,7 @@ func installCertSymlinks(cr command.Runner, caCerts map[string]string) error {
|
|||
}
|
||||
|
||||
if !hasSSLBinary && len(caCerts) > 0 {
|
||||
glog.Warning("OpenSSL not found. Please recreate the cluster with the latest minikube ISO.")
|
||||
klog.Warning("OpenSSL not found. Please recreate the cluster with the latest minikube ISO.")
|
||||
}
|
||||
|
||||
for _, caCertFile := range caCerts {
|
||||
|
|
|
@ -33,10 +33,10 @@ import (
|
|||
"github.com/blang/semver"
|
||||
"github.com/docker/machine/libmachine"
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/kapi"
|
||||
|
@ -119,10 +119,10 @@ func (k *Bootstrapper) createCompatSymlinks() error {
|
|||
legacyEtcd := "/data/minikube"
|
||||
|
||||
if _, err := k.c.RunCmd(exec.Command("sudo", "test", "-d", legacyEtcd)); err != nil {
|
||||
glog.Infof("%s skipping compat symlinks: %v", legacyEtcd, err)
|
||||
klog.Infof("%s skipping compat symlinks: %v", legacyEtcd, err)
|
||||
return nil
|
||||
}
|
||||
glog.Infof("Found %s, creating compatibility symlinks ...", legacyEtcd)
|
||||
klog.Infof("Found %s, creating compatibility symlinks ...", legacyEtcd)
|
||||
|
||||
c := exec.Command("sudo", "ln", "-s", legacyEtcd, bsutil.EtcdDataDir())
|
||||
if rr, err := k.c.RunCmd(c); err != nil {
|
||||
|
@ -144,10 +144,10 @@ func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error {
|
|||
args := append([]string{"ls", "-la"}, paths...)
|
||||
rr, err := k.c.RunCmd(exec.Command("sudo", args...))
|
||||
if err != nil {
|
||||
glog.Infof("config check failed, skipping stale config cleanup: %v", err)
|
||||
klog.Infof("config check failed, skipping stale config cleanup: %v", err)
|
||||
return nil
|
||||
}
|
||||
glog.Infof("found existing configuration files:\n%s\n", rr.Stdout.String())
|
||||
klog.Infof("found existing configuration files:\n%s\n", rr.Stdout.String())
|
||||
|
||||
cp, err := config.PrimaryControlPlane(&cfg)
|
||||
if err != nil {
|
||||
|
@ -158,11 +158,11 @@ func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error {
|
|||
for _, path := range paths {
|
||||
_, err := k.c.RunCmd(exec.Command("sudo", "grep", endpoint, path))
|
||||
if err != nil {
|
||||
glog.Infof("%q may not be in %s - will remove: %v", endpoint, path, err)
|
||||
klog.Infof("%q may not be in %s - will remove: %v", endpoint, path, err)
|
||||
|
||||
_, err := k.c.RunCmd(exec.Command("sudo", "rm", "-f", path))
|
||||
if err != nil {
|
||||
glog.Errorf("rm failed: %v", err)
|
||||
klog.Errorf("rm failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -197,18 +197,18 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
|
|||
skipSystemVerification := false
|
||||
// Allow older kubeadm versions to function with newer Docker releases.
|
||||
if version.LT(semver.MustParse("1.13.0")) {
|
||||
glog.Infof("ignoring SystemVerification for kubeadm because of old Kubernetes version %v", version)
|
||||
klog.Infof("ignoring SystemVerification for kubeadm because of old Kubernetes version %v", version)
|
||||
skipSystemVerification = true
|
||||
}
|
||||
if driver.BareMetal(cfg.Driver) && r.Name() == "Docker" {
|
||||
if v, err := r.Version(); err == nil && strings.Contains(v, "azure") {
|
||||
glog.Infof("ignoring SystemVerification for kubeadm because of unknown docker version %s", v)
|
||||
klog.Infof("ignoring SystemVerification for kubeadm because of unknown docker version %s", v)
|
||||
skipSystemVerification = true
|
||||
}
|
||||
}
|
||||
// For kic on linux example error: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.2.17-1rodete3-amd64"
|
||||
if driver.IsKIC(cfg.Driver) {
|
||||
glog.Infof("ignoring SystemVerification for kubeadm because of %s driver", cfg.Driver)
|
||||
klog.Infof("ignoring SystemVerification for kubeadm because of %s driver", cfg.Driver)
|
||||
skipSystemVerification = true
|
||||
}
|
||||
if skipSystemVerification {
|
||||
|
@ -249,21 +249,21 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
|
|||
go func() {
|
||||
// we need to have cluster role binding before applying overlay to avoid #7428
|
||||
if err := k.elevateKubeSystemPrivileges(cfg); err != nil {
|
||||
glog.Errorf("unable to create cluster role binding, some addons might not work: %v", err)
|
||||
klog.Errorf("unable to create cluster role binding, some addons might not work: %v", err)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
if err := k.applyNodeLabels(cfg); err != nil {
|
||||
glog.Warningf("unable to apply node labels: %v", err)
|
||||
klog.Warningf("unable to apply node labels: %v", err)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
if err := bsutil.AdjustResourceLimits(k.c); err != nil {
|
||||
glog.Warningf("unable to adjust resource limits: %v", err)
|
||||
klog.Warningf("unable to adjust resource limits: %v", err)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
|
@ -321,18 +321,18 @@ func (k *Bootstrapper) unpause(cfg config.ClusterConfig) error {
|
|||
// StartCluster starts the cluster
|
||||
func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
|
||||
start := time.Now()
|
||||
glog.Infof("StartCluster: %+v", cfg)
|
||||
klog.Infof("StartCluster: %+v", cfg)
|
||||
defer func() {
|
||||
glog.Infof("StartCluster complete in %s", time.Since(start))
|
||||
klog.Infof("StartCluster complete in %s", time.Since(start))
|
||||
}()
|
||||
|
||||
// Before we start, ensure that no paused components are lurking around
|
||||
if err := k.unpause(cfg); err != nil {
|
||||
glog.Warningf("unpause failed: %v", err)
|
||||
klog.Warningf("unpause failed: %v", err)
|
||||
}
|
||||
|
||||
if err := bsutil.ExistingConfig(k.c); err == nil {
|
||||
glog.Infof("found existing configuration files, will attempt cluster restart")
|
||||
klog.Infof("found existing configuration files, will attempt cluster restart")
|
||||
rerr := k.restartControlPlane(cfg)
|
||||
if rerr == nil {
|
||||
return nil
|
||||
|
@ -340,7 +340,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
|
|||
|
||||
out.ErrT(style.Embarrassed, "Unable to restart cluster, will reset it: {{.error}}", out.V{"error": rerr})
|
||||
if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil {
|
||||
glog.Warningf("delete failed: %v", err)
|
||||
klog.Warningf("delete failed: %v", err)
|
||||
}
|
||||
// Fall-through to init
|
||||
}
|
||||
|
@ -359,7 +359,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
|
|||
if _, ff := err.(*FailFastError); !ff {
|
||||
out.ErrT(style.Conflict, "initialization failed, will try again: {{.error}}", out.V{"error": err})
|
||||
if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil {
|
||||
glog.Warningf("delete failed: %v", err)
|
||||
klog.Warningf("delete failed: %v", err)
|
||||
}
|
||||
return k.init(cfg)
|
||||
}
|
||||
|
@ -379,7 +379,7 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error
|
|||
|
||||
endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(ip, strconv.Itoa(port)))
|
||||
if cc.Host != endpoint {
|
||||
glog.Warningf("Overriding stale ClientConfig host %s with %s", cc.Host, endpoint)
|
||||
klog.Warningf("Overriding stale ClientConfig host %s with %s", cc.Host, endpoint)
|
||||
cc.Host = endpoint
|
||||
}
|
||||
c, err := kubernetes.NewForConfig(cc)
|
||||
|
@ -392,10 +392,13 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error
|
|||
// WaitForNode blocks until the node appears to be healthy
|
||||
func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, timeout time.Duration) error {
|
||||
start := time.Now()
|
||||
|
||||
register.Reg.SetStep(register.VerifyingKubernetes)
|
||||
out.T(style.HealthCheck, "Verifying Kubernetes components...")
|
||||
|
||||
// regardless if waiting is set or not, we will make sure kubelet is not stopped
|
||||
// to solve corner cases when a container is hibernated and once coming back kubelet not running.
|
||||
if err := k.ensureServiceStarted("kubelet"); err != nil {
|
||||
klog.Warningf("Couldn't ensure kubelet is started this might cause issues: %v", err)
|
||||
}
|
||||
// TODO: #7706: for better performance we could use k.client inside minikube to avoid asking for external IP:PORT
|
||||
cp, err := config.PrimaryControlPlane(&cfg)
|
||||
if err != nil {
|
||||
|
@ -412,7 +415,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
|
|||
}
|
||||
|
||||
if !kverify.ShouldWait(cfg.VerifyComponents) {
|
||||
glog.Infof("skip waiting for components based on config.")
|
||||
klog.Infof("skip waiting for components based on config.")
|
||||
|
||||
if err := kverify.NodePressure(client); err != nil {
|
||||
adviseNodePressure(err, cfg.Name, cfg.Driver)
|
||||
|
@ -455,6 +458,12 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
|
|||
}
|
||||
}
|
||||
}
|
||||
if cfg.VerifyComponents[kverify.KubeletKey] {
|
||||
if err := kverify.WaitForService(k.c, "kubelet", timeout); err != nil {
|
||||
return errors.Wrap(err, "waiting for kubelet")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if cfg.VerifyComponents[kverify.NodeReadyKey] {
|
||||
if err := kverify.WaitForNodeReady(client, timeout); err != nil {
|
||||
|
@ -462,7 +471,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
|
|||
}
|
||||
}
|
||||
|
||||
glog.Infof("duration metric: took %s to wait for : %+v ...", time.Since(start), cfg.VerifyComponents)
|
||||
klog.Infof("duration metric: took %s to wait for : %+v ...", time.Since(start), cfg.VerifyComponents)
|
||||
|
||||
if err := kverify.NodePressure(client); err != nil {
|
||||
adviseNodePressure(err, cfg.Name, cfg.Driver)
|
||||
|
@ -471,46 +480,55 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
|
|||
return nil
|
||||
}
|
||||
|
||||
// ensureKubeletStarted will start a systemd or init.d service if it is not running.
|
||||
func (k *Bootstrapper) ensureServiceStarted(svc string) error {
|
||||
if st := kverify.ServiceStatus(k.c, svc); st != state.Running {
|
||||
klog.Warningf("surprisingly %q service status was %s!. will try to start it, could be related to this issue https://github.com/kubernetes/minikube/issues/9458", svc, st)
|
||||
return sysinit.New(k.c).Start(svc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// needsReconfigure returns whether or not the cluster needs to be reconfigured
|
||||
func (k *Bootstrapper) needsReconfigure(conf string, hostname string, port int, client *kubernetes.Clientset, version string) bool {
|
||||
if rr, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")); err != nil {
|
||||
glog.Infof("needs reconfigure: configs differ:\n%s", rr.Output())
|
||||
klog.Infof("needs reconfigure: configs differ:\n%s", rr.Output())
|
||||
return true
|
||||
}
|
||||
|
||||
st, err := kverify.APIServerStatus(k.c, hostname, port)
|
||||
if err != nil {
|
||||
glog.Infof("needs reconfigure: apiserver error: %v", err)
|
||||
klog.Infof("needs reconfigure: apiserver error: %v", err)
|
||||
return true
|
||||
}
|
||||
|
||||
if st != state.Running {
|
||||
glog.Infof("needs reconfigure: apiserver in state %s", st)
|
||||
klog.Infof("needs reconfigure: apiserver in state %s", st)
|
||||
return true
|
||||
}
|
||||
|
||||
if err := kverify.ExpectAppsRunning(client, kverify.AppsRunningList); err != nil {
|
||||
glog.Infof("needs reconfigure: %v", err)
|
||||
klog.Infof("needs reconfigure: %v", err)
|
||||
return true
|
||||
}
|
||||
|
||||
if err := kverify.APIServerVersionMatch(client, version); err != nil {
|
||||
glog.Infof("needs reconfigure: %v", err)
|
||||
klog.Infof("needs reconfigure: %v", err)
|
||||
return true
|
||||
}
|
||||
|
||||
// DANGER: This log message is hard-coded in an integration test!
|
||||
glog.Infof("The running cluster does not require reconfiguration: %s", hostname)
|
||||
klog.Infof("The running cluster does not require reconfiguration: %s", hostname)
|
||||
return false
|
||||
}
|
||||
|
||||
// restartCluster restarts the Kubernetes cluster configured by kubeadm
|
||||
func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error {
|
||||
glog.Infof("restartCluster start")
|
||||
klog.Infof("restartCluster start")
|
||||
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
glog.Infof("restartCluster took %s", time.Since(start))
|
||||
klog.Infof("restartCluster took %s", time.Since(start))
|
||||
}()
|
||||
|
||||
version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion)
|
||||
|
@ -526,7 +544,7 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error {
|
|||
}
|
||||
|
||||
if err := k.createCompatSymlinks(); err != nil {
|
||||
glog.Errorf("failed to create compat symlinks: %v", err)
|
||||
klog.Errorf("failed to create compat symlinks: %v", err)
|
||||
}
|
||||
|
||||
cp, err := config.PrimaryControlPlane(&cfg)
|
||||
|
@ -542,7 +560,7 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error {
|
|||
// Save the costly tax of reinstalling Kubernetes if the only issue is a missing kube context
|
||||
_, err = kubeconfig.UpdateEndpoint(cfg.Name, hostname, port, kubeconfig.PathFromEnv())
|
||||
if err != nil {
|
||||
glog.Warningf("unable to update kubeconfig (cluster will likely require a reset): %v", err)
|
||||
klog.Warningf("unable to update kubeconfig (cluster will likely require a reset): %v", err)
|
||||
}
|
||||
|
||||
client, err := k.client(hostname, port)
|
||||
|
@ -553,16 +571,16 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error {
|
|||
// If the cluster is running, check if we have any work to do.
|
||||
conf := bsutil.KubeadmYamlPath
|
||||
if !k.needsReconfigure(conf, hostname, port, client, cfg.KubernetesConfig.KubernetesVersion) {
|
||||
glog.Infof("Taking a shortcut, as the cluster seems to be properly configured")
|
||||
klog.Infof("Taking a shortcut, as the cluster seems to be properly configured")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := k.stopKubeSystem(cfg); err != nil {
|
||||
glog.Warningf("Failed to stop kube-system containers: port conflicts may arise: %v", err)
|
||||
klog.Warningf("Failed to stop kube-system containers: port conflicts may arise: %v", err)
|
||||
}
|
||||
|
||||
if err := sysinit.New(k.c).Stop("kubelet"); err != nil {
|
||||
glog.Warningf("Failed to stop kubelet, this might cause upgrade errors: %v", err)
|
||||
klog.Warningf("Failed to stop kubelet, this might cause upgrade errors: %v", err)
|
||||
}
|
||||
|
||||
if err := k.clearStaleConfigs(cfg); err != nil {
|
||||
|
@ -582,11 +600,11 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error {
|
|||
fmt.Sprintf("%s phase etcd local --config %s", baseCmd, conf),
|
||||
}
|
||||
|
||||
glog.Infof("reconfiguring cluster from %s", conf)
|
||||
klog.Infof("reconfiguring cluster from %s", conf)
|
||||
// Run commands one at a time so that it is easier to root cause failures.
|
||||
for _, c := range cmds {
|
||||
if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c)); err != nil {
|
||||
glog.Errorf("%s failed - will try once more: %v", c, err)
|
||||
klog.Errorf("%s failed - will try once more: %v", c, err)
|
||||
|
||||
if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c)); err != nil {
|
||||
return errors.Wrap(err, "run")
|
||||
|
@ -627,12 +645,12 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error {
|
|||
return err
|
||||
}
|
||||
if err = retry.Expo(addonPhase, 100*time.Microsecond, 30*time.Second); err != nil {
|
||||
glog.Warningf("addon install failed, wil retry: %v", err)
|
||||
klog.Warningf("addon install failed, wil retry: %v", err)
|
||||
return errors.Wrap(err, "addons")
|
||||
}
|
||||
|
||||
if err := bsutil.AdjustResourceLimits(k.c); err != nil {
|
||||
glog.Warningf("unable to adjust resource limits: %v", err)
|
||||
klog.Warningf("unable to adjust resource limits: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -640,9 +658,9 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error {
|
|||
// JoinCluster adds a node to an existing cluster
|
||||
func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinCmd string) error {
|
||||
start := time.Now()
|
||||
glog.Infof("JoinCluster: %+v", cc)
|
||||
klog.Infof("JoinCluster: %+v", cc)
|
||||
defer func() {
|
||||
glog.Infof("JoinCluster complete in %s", time.Since(start))
|
||||
klog.Infof("JoinCluster complete in %s", time.Since(start))
|
||||
}()
|
||||
|
||||
// Join the master by specifying its token
|
||||
|
@ -652,13 +670,13 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC
|
|||
// reset first to clear any possibly existing state
|
||||
_, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s reset -f", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion))))
|
||||
if err != nil {
|
||||
glog.Infof("kubeadm reset failed, continuing anyway: %v", err)
|
||||
klog.Infof("kubeadm reset failed, continuing anyway: %v", err)
|
||||
}
|
||||
|
||||
out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd))
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "status \"Ready\" already exists in the cluster") {
|
||||
glog.Infof("Node %s already joined the cluster, skip failure.", n.Name)
|
||||
klog.Infof("Node %s already joined the cluster, skip failure.", n.Name)
|
||||
} else {
|
||||
return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out.Output())
|
||||
}
|
||||
|
@ -720,21 +738,21 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
|
|||
|
||||
rr, derr := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd))
|
||||
if derr != nil {
|
||||
glog.Warningf("%s: %v", rr.Command(), err)
|
||||
klog.Warningf("%s: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if err := sysinit.New(k.c).ForceStop("kubelet"); err != nil {
|
||||
glog.Warningf("stop kubelet: %v", err)
|
||||
klog.Warningf("stop kubelet: %v", err)
|
||||
}
|
||||
|
||||
containers, err := cr.ListContainers(cruntime.ListOptions{Namespaces: []string{"kube-system"}})
|
||||
if err != nil {
|
||||
glog.Warningf("unable to list kube-system containers: %v", err)
|
||||
klog.Warningf("unable to list kube-system containers: %v", err)
|
||||
}
|
||||
if len(containers) > 0 {
|
||||
glog.Warningf("found %d kube-system containers to stop", len(containers))
|
||||
klog.Warningf("found %d kube-system containers to stop", len(containers))
|
||||
if err := cr.StopContainers(containers); err != nil {
|
||||
glog.Warningf("error stopping containers: %v", err)
|
||||
klog.Warningf("error stopping containers: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -763,7 +781,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
|
|||
}
|
||||
|
||||
if err := r.Preload(cfg.KubernetesConfig); err != nil {
|
||||
glog.Infof("preload failed, will try to load cached images: %v", err)
|
||||
klog.Infof("preload failed, will try to load cached images: %v", err)
|
||||
}
|
||||
|
||||
if cfg.KubernetesConfig.ShouldLoadCachedImages {
|
||||
|
@ -802,7 +820,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru
|
|||
return errors.Wrap(err, "generating kubelet service")
|
||||
}
|
||||
|
||||
glog.Infof("kubelet %s config:\n%+v", kubeletCfg, cfg.KubernetesConfig)
|
||||
klog.Infof("kubelet %s config:\n%+v", kubeletCfg, cfg.KubernetesConfig)
|
||||
|
||||
sm := sysinit.New(k.c)
|
||||
|
||||
|
@ -878,7 +896,7 @@ func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error {
|
|||
func (k *Bootstrapper) elevateKubeSystemPrivileges(cfg config.ClusterConfig) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
glog.Infof("duration metric: took %s to wait for elevateKubeSystemPrivileges.", time.Since(start))
|
||||
klog.Infof("duration metric: took %s to wait for elevateKubeSystemPrivileges.", time.Since(start))
|
||||
}()
|
||||
|
||||
// Allow no more than 5 seconds for creating cluster role bindings
|
||||
|
@ -896,7 +914,7 @@ func (k *Bootstrapper) elevateKubeSystemPrivileges(cfg config.ClusterConfig) err
|
|||
}
|
||||
// Error from server (AlreadyExists): clusterrolebindings.rbac.authorization.k8s.io "minikube-rbac" already exists
|
||||
if strings.Contains(rr.Output(), "Error from server (AlreadyExists)") {
|
||||
glog.Infof("rbac %q already exists not need to re-create.", rbacName)
|
||||
klog.Infof("rbac %q already exists not need to re-create.", rbacName)
|
||||
} else {
|
||||
return errors.Wrapf(err, "apply sa")
|
||||
}
|
||||
|
@ -925,7 +943,7 @@ func (k *Bootstrapper) elevateKubeSystemPrivileges(cfg config.ClusterConfig) err
|
|||
|
||||
// stopKubeSystem stops all the containers in the kube-system to prevent #8740 when doing hot upgrade
|
||||
func (k *Bootstrapper) stopKubeSystem(cfg config.ClusterConfig) error {
|
||||
glog.Info("stopping kube-system containers ...")
|
||||
klog.Info("stopping kube-system containers ...")
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new cruntime")
|
||||
|
@ -948,7 +966,7 @@ func (k *Bootstrapper) stopKubeSystem(cfg config.ClusterConfig) error {
|
|||
func adviseNodePressure(err error, name string, drv string) {
|
||||
if diskErr, ok := err.(*kverify.ErrDiskPressure); ok {
|
||||
out.ErrLn("")
|
||||
glog.Warning(diskErr)
|
||||
klog.Warning(diskErr)
|
||||
out.WarningT("The node {{.name}} has ran out of disk space.", out.V{"name": name})
|
||||
// generic advice for all drivers
|
||||
out.T(style.Tip, "Please free up disk or prune images.")
|
||||
|
@ -969,7 +987,7 @@ func adviseNodePressure(err error, name string, drv string) {
|
|||
|
||||
if memErr, ok := err.(*kverify.ErrMemoryPressure); ok {
|
||||
out.ErrLn("")
|
||||
glog.Warning(memErr)
|
||||
klog.Warning(memErr)
|
||||
out.WarningT("The node {{.name}} has ran out of memory.", out.V{"name": name})
|
||||
out.T(style.Tip, "Check if you have unnecessary pods running by running 'kubectl get po -A")
|
||||
if driver.IsVM(drv) {
|
||||
|
@ -988,7 +1006,7 @@ func adviseNodePressure(err error, name string, drv string) {
|
|||
}
|
||||
|
||||
if pidErr, ok := err.(*kverify.ErrPIDPressure); ok {
|
||||
glog.Warning(pidErr)
|
||||
klog.Warning(pidErr)
|
||||
out.ErrLn("")
|
||||
out.WarningT("The node {{.name}} has ran out of available PIDs.", out.V{"name": name})
|
||||
out.ErrLn("")
|
||||
|
@ -996,7 +1014,7 @@ func adviseNodePressure(err error, name string, drv string) {
|
|||
}
|
||||
|
||||
if netErr, ok := err.(*kverify.ErrNetworkNotReady); ok {
|
||||
glog.Warning(netErr)
|
||||
klog.Warning(netErr)
|
||||
out.ErrLn("")
|
||||
out.WarningT("The node {{.name}} network is not available. Please verify network settings.", out.V{"name": name})
|
||||
out.ErrLn("")
|
||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/machine/libmachine"
|
||||
|
@ -29,19 +28,13 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/reason"
|
||||
)
|
||||
|
||||
// This init function is used to set the logtostderr variable to false so that INFO level log info does not clutter the CLI
|
||||
// INFO lvl logging is displayed due to the Kubernetes api calling flag.Set("logtostderr", "true") in its init()
|
||||
// see: https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/util/logs/logs.go#L32-L34
|
||||
func init() {
|
||||
if err := flag.Set("logtostderr", "false"); err != nil {
|
||||
exit.Error(reason.InternalFlagSet, "unable to set logtostderr", err)
|
||||
}
|
||||
|
||||
// Setting the default client to native gives much better performance.
|
||||
ssh.SetDefaultClient(ssh.Native)
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue