Merge branch 'master' into f-fix-5144
commit
0d29a2ac7b
|
@ -0,0 +1,144 @@
|
||||||
|
name: CI
|
||||||
|
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docker_ubuntu_16_04:
|
||||||
|
runs-on: ubuntu-16.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: build binaries
|
||||||
|
run : |
|
||||||
|
make minikube-linux-amd64
|
||||||
|
make e2e-linux-amd64
|
||||||
|
mkdir -p report
|
||||||
|
- name: install gopogh
|
||||||
|
run: |
|
||||||
|
cd /tmp
|
||||||
|
GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true
|
||||||
|
cd -
|
||||||
|
- name: run integration test
|
||||||
|
run: |
|
||||||
|
mkdir -p /tmp/testhome
|
||||||
|
MINIKUBE_HOME=/tmp/testhome ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||||
|
- name: generate gopogh report
|
||||||
|
run: |
|
||||||
|
export PATH=${PATH}:`go env GOPATH`/bin
|
||||||
|
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
|
||||||
|
gopogh -in ./report/testout.json -out ./report/testout.html -name "docker ubuntu" -repo github.com/kubernetes/minikube/ || true
|
||||||
|
- uses: actions/upload-artifact@v1
|
||||||
|
with:
|
||||||
|
name: docker_on_ubuntu_16_04_report
|
||||||
|
path: report
|
||||||
|
docker_ubuntu_18_04:
|
||||||
|
runs-on: ubuntu-18.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: build binaries
|
||||||
|
run : |
|
||||||
|
make minikube-linux-amd64
|
||||||
|
make e2e-linux-amd64
|
||||||
|
mkdir -p report
|
||||||
|
- name: install gopogh
|
||||||
|
run: |
|
||||||
|
cd /tmp
|
||||||
|
GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true
|
||||||
|
cd -
|
||||||
|
- name: run integration test
|
||||||
|
run: |
|
||||||
|
mkdir -p /tmp/testhome
|
||||||
|
MINIKUBE_HOME=/tmp/testhome ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||||
|
- name: generate gopogh report
|
||||||
|
run: |
|
||||||
|
export PATH=${PATH}:`go env GOPATH`/bin
|
||||||
|
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
|
||||||
|
gopogh -in ./report/testout.json -out ./report/testout.html -name "docker ubuntu" -repo github.com/kubernetes/minikube/ || true
|
||||||
|
- uses: actions/upload-artifact@v1
|
||||||
|
with:
|
||||||
|
name: docker_on_ubuntu_18_04_report
|
||||||
|
path: report
|
||||||
|
docker_macos:
|
||||||
|
runs-on: macos-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: build binaries
|
||||||
|
run : |
|
||||||
|
make minikube-darwin-amd64
|
||||||
|
make e2e-darwin-amd64
|
||||||
|
mkdir -p report
|
||||||
|
- name: install docker
|
||||||
|
run: |
|
||||||
|
brew install docker-machine docker || true
|
||||||
|
brew services start docker-machine || true
|
||||||
|
docker version || true
|
||||||
|
- name: install gopogh
|
||||||
|
run: |
|
||||||
|
cd /tmp
|
||||||
|
GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true
|
||||||
|
cd -
|
||||||
|
- name: run integration test
|
||||||
|
run: |
|
||||||
|
mkdir -p /tmp/testhome
|
||||||
|
MINIKUBE_HOME=/tmp/testhome ./out/e2e-darwin-amd64 -minikube-start-args=--vm-driver=docker -expected-default-driver= -test.timeout=70m -test.v -binary=./out/minikube-darwin-amd64 2>&1 | tee ./report/testout.txt
|
||||||
|
- name: generate gopogh report
|
||||||
|
run: |
|
||||||
|
export PATH=${PATH}:`go env GOPATH`/bin
|
||||||
|
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
|
||||||
|
gopogh -in ./report/testout.json -out ./report/testout.html -name "docker macos" -repo github.com/kubernetes/minikube/ || true
|
||||||
|
- uses: actions/upload-artifact@v1
|
||||||
|
with:
|
||||||
|
name: docker_on_macos_report
|
||||||
|
path: ./report
|
||||||
|
none_ubuntu16_04:
|
||||||
|
runs-on: ubuntu-16.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: build binaries
|
||||||
|
run : |
|
||||||
|
make minikube-linux-amd64
|
||||||
|
make e2e-linux-amd64
|
||||||
|
mkdir -p report
|
||||||
|
- name: install gopogh
|
||||||
|
run: |
|
||||||
|
cd /tmp
|
||||||
|
GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true
|
||||||
|
cd -
|
||||||
|
- name: run integration test
|
||||||
|
run: |
|
||||||
|
mkdir -p /tmp/testhome
|
||||||
|
MINIKUBE_HOME=/tmp/testhome sudo -E ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=none -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||||
|
- name: generate gopogh report
|
||||||
|
run: |
|
||||||
|
export PATH=${PATH}:`go env GOPATH`/bin
|
||||||
|
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
|
||||||
|
gopogh -in ./report/testout.json -out ./report/testout.html -name "docker ubuntu" -repo github.com/kubernetes/minikube/ || true
|
||||||
|
- uses: actions/upload-artifact@v1
|
||||||
|
with:
|
||||||
|
name: none_on_ubuntu_16_04
|
||||||
|
path: report
|
||||||
|
none_ubuntu_18_04:
|
||||||
|
runs-on: ubuntu-18.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: build binaries
|
||||||
|
run : |
|
||||||
|
make minikube-linux-amd64
|
||||||
|
make e2e-linux-amd64
|
||||||
|
- name: install gopogh
|
||||||
|
run: |
|
||||||
|
cd /tmp
|
||||||
|
GO111MODULE="on" go get github.com/medyagh/gopogh@v0.0.17 || true
|
||||||
|
cd -
|
||||||
|
- name: run integration test
|
||||||
|
run: |
|
||||||
|
mkdir -p /tmp/testhome
|
||||||
|
MINIKUBE_HOME=/tmp/testhome sudo -E ./out/e2e-linux-amd64 -minikube-start-args=--vm-driver=none -expected-default-driver= -test.timeout=70m -test.v -binary=out/minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||||
|
- name: generate gopogh report
|
||||||
|
run: |
|
||||||
|
export PATH=${PATH}:`go env GOPATH`/bin
|
||||||
|
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
|
||||||
|
gopogh -in ./report/testout.json -out ./report/testout.html -name "docker ubuntu" -repo github.com/kubernetes/minikube/ || true
|
||||||
|
- uses: actions/upload-artifact@v1
|
||||||
|
with:
|
||||||
|
name: none_on_ubuntu_latest_report
|
||||||
|
path: report
|
100
CHANGELOG.md
100
CHANGELOG.md
|
@ -1,5 +1,103 @@
|
||||||
# Release Notes
|
# Release Notes
|
||||||
|
|
||||||
|
## Version 1.7.0 - 2020-02-04
|
||||||
|
|
||||||
|
* Add Azure Container Registry support [#6483](https://github.com/kubernetes/minikube/pull/6483)
|
||||||
|
* Support --force for overriding the ssh check [#6237](https://github.com/kubernetes/minikube/pull/6237)
|
||||||
|
* Update translation files with new strings [#6491](https://github.com/kubernetes/minikube/pull/6491)
|
||||||
|
* fix docker-env for kic drivers [#6487](https://github.com/kubernetes/minikube/pull/6487)
|
||||||
|
* Fix bugs that prevented previously-enabled addons from starting up [#6471](https://github.com/kubernetes/minikube/pull/6471)
|
||||||
|
* Fix none driver bugs with "pause" [#6452](https://github.com/kubernetes/minikube/pull/6452)
|
||||||
|
|
||||||
|
Thank you to those brave souls who made the final push toward this release:
|
||||||
|
|
||||||
|
- Medya Gh
|
||||||
|
- Priya Wadhwa
|
||||||
|
- Sharif Elgamal
|
||||||
|
- Thomas Strömberg
|
||||||
|
|
||||||
|
## Version 1.7.0-beta.2 - 2020-01-31
|
||||||
|
|
||||||
|
* Add docker run-time for kic driver [#6436](https://github.com/kubernetes/minikube/pull/6436)
|
||||||
|
* Configure etcd and kube-proxy metrics to listen on minikube node IP [#6322](https://github.com/kubernetes/minikube/pull/6322)
|
||||||
|
* add container runtime info to profile list [#6409](https://github.com/kubernetes/minikube/pull/6409)
|
||||||
|
* status: Explicitly state that the cluster does not exist [#6438](https://github.com/kubernetes/minikube/pull/6438)
|
||||||
|
* Do not use an arch suffix for the coredns name [#6243](https://github.com/kubernetes/minikube/pull/6243)
|
||||||
|
* Prevent registry-creds configure from failing when a secret does not exist. [#6380](https://github.com/kubernetes/minikube/pull/6380)
|
||||||
|
* improve checking modprob netfilter [#6427](https://github.com/kubernetes/minikube/pull/6427)
|
||||||
|
|
||||||
|
Huge thank you for this release towards our contributors:
|
||||||
|
|
||||||
|
- Anders Björklund
|
||||||
|
- Bjørn Harald Fotland
|
||||||
|
- Chance Zibolski
|
||||||
|
- Kim Bao Long
|
||||||
|
- Medya Ghazizadeh
|
||||||
|
- Priya Wadhwa
|
||||||
|
- Sharif Elgamal
|
||||||
|
- Thomas Strömberg
|
||||||
|
- akshay
|
||||||
|
|
||||||
|
## Version 1.7.0-beta.1 - 2020-01-24
|
||||||
|
|
||||||
|
* Add 'pause' command to freeze Kubernetes cluster [#5962](https://github.com/kubernetes/minikube/pull/5962)
|
||||||
|
* kic driver: add multiple profiles and ssh [#6390](https://github.com/kubernetes/minikube/pull/6390)
|
||||||
|
* Update DefaultKubernetesVersion to v1.17.2 [#6392](https://github.com/kubernetes/minikube/pull/6392)
|
||||||
|
* Add varlink program for using with podman-remote [#6349](https://github.com/kubernetes/minikube/pull/6349)
|
||||||
|
* Update Kubernetes libraries to v1.17.2 [#6374](https://github.com/kubernetes/minikube/pull/6374)
|
||||||
|
* Remove addon manager [#6334](https://github.com/kubernetes/minikube/pull/6334)
|
||||||
|
* Remove unnecessary crio restart to improve start latency [#6369](https://github.com/kubernetes/minikube/pull/6369)
|
||||||
|
* Check for nil ref and img before passing them into go-containerregistry [#6236](https://github.com/kubernetes/minikube/pull/6236)
|
||||||
|
* Change the compression methods used on the iso [#6341](https://github.com/kubernetes/minikube/pull/6341)
|
||||||
|
* Update the crio.conf instead of overwriting it [#6219](https://github.com/kubernetes/minikube/pull/6219)
|
||||||
|
* Update Japanese translation [#6339](https://github.com/kubernetes/minikube/pull/6339)
|
||||||
|
* Stop minikube dashboard from crashing at start [#6325](https://github.com/kubernetes/minikube/pull/6325)
|
||||||
|
|
||||||
|
Thanks you to the following contributors:
|
||||||
|
|
||||||
|
- Anders F Björklund
|
||||||
|
- inductor
|
||||||
|
- Medya Ghazizadeh
|
||||||
|
- Naoki Oketani
|
||||||
|
- Priya Wadhwa
|
||||||
|
- Sharif Elgamal
|
||||||
|
- sshukun
|
||||||
|
- Thomas Strömberg
|
||||||
|
|
||||||
|
## Version 1.7.0-beta.0 - 2020-01-15
|
||||||
|
|
||||||
|
* Use CGroupDriver function from cruntime for kubelet [#6287](https://github.com/kubernetes/minikube/pull/6287)
|
||||||
|
* Experimental Docker support (kic) using the Kind image [#6151](https://github.com/kubernetes/minikube/pull/6151)
|
||||||
|
* disable istio provisioner by default [#6315](https://github.com/kubernetes/minikube/pull/6315)
|
||||||
|
* Add --dry-run option to start [#6256](https://github.com/kubernetes/minikube/pull/6256)
|
||||||
|
* Improve "addon list" by viewing as a table [#6274](https://github.com/kubernetes/minikube/pull/6274)
|
||||||
|
* Disable IPv6 in the minikube VM until it can be properly supported [#6241](https://github.com/kubernetes/minikube/pull/6241)
|
||||||
|
* Fixes IPv6 address handling in kubeadm [#6214](https://github.com/kubernetes/minikube/pull/6214)
|
||||||
|
* Upgrade crio to 1.16.1 [#6210](https://github.com/kubernetes/minikube/pull/6210)
|
||||||
|
* Upgrade podman to 1.6.4 [#6208](https://github.com/kubernetes/minikube/pull/6208)
|
||||||
|
* Enable or disable addons per profile [#6124](https://github.com/kubernetes/minikube/pull/6124)
|
||||||
|
* Upgrade buildroot minor version [#6199](https://github.com/kubernetes/minikube/pull/6199)
|
||||||
|
* Add systemd patch for booting on AMD Ryzen [#6183](https://github.com/kubernetes/minikube/pull/6183)
|
||||||
|
* update zh translation [#6176](https://github.com/kubernetes/minikube/pull/6176)
|
||||||
|
* Add istio addon for minikube [#6154](https://github.com/kubernetes/minikube/pull/6154)
|
||||||
|
|
||||||
|
Huge thank you for this release towards our contributors:
|
||||||
|
- Anders Björklund
|
||||||
|
- andylibrian
|
||||||
|
- Dao Cong Tien
|
||||||
|
- Dominic Yin
|
||||||
|
- fenglixa
|
||||||
|
- GennadySpb
|
||||||
|
- Kenta Iso
|
||||||
|
- Kim Bao Long
|
||||||
|
- Medya Ghazizadeh
|
||||||
|
- Nguyen Hai Truong
|
||||||
|
- Priya Wadhwa
|
||||||
|
- Sharif Elgamal
|
||||||
|
- Thomas Strömberg
|
||||||
|
- ttonline6
|
||||||
|
- Zhongcheng Lao
|
||||||
|
- Zhou Hao
|
||||||
|
|
||||||
## Version 1.6.2 - 2019-12-19
|
## Version 1.6.2 - 2019-12-19
|
||||||
|
|
||||||
|
@ -11,7 +109,7 @@
|
||||||
* start: Remove create/delete retry loop [#6129](https://github.com/kubernetes/minikube/pull/6129)
|
* start: Remove create/delete retry loop [#6129](https://github.com/kubernetes/minikube/pull/6129)
|
||||||
* Change error text to encourage better issue reports [#6121](https://github.com/kubernetes/minikube/pull/6121)
|
* Change error text to encourage better issue reports [#6121](https://github.com/kubernetes/minikube/pull/6121)
|
||||||
|
|
||||||
Huge thank you for this release towards our contributors:
|
Huge thank you for this release towards our contributors:
|
||||||
- Anukul Sangwan
|
- Anukul Sangwan
|
||||||
- Aresforchina
|
- Aresforchina
|
||||||
- Curtis Carter
|
- Curtis Carter
|
||||||
|
|
89
Makefile
89
Makefile
|
@ -15,12 +15,12 @@
|
||||||
# Bump these on release - and please check ISO_VERSION for correctness.
|
# Bump these on release - and please check ISO_VERSION for correctness.
|
||||||
VERSION_MAJOR ?= 1
|
VERSION_MAJOR ?= 1
|
||||||
VERSION_MINOR ?= 7
|
VERSION_MINOR ?= 7
|
||||||
VERSION_BUILD ?= 0-beta.0
|
VERSION_BUILD ?= 0
|
||||||
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).${VERSION_BUILD}
|
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).${VERSION_BUILD}
|
||||||
VERSION ?= v$(RAW_VERSION)
|
VERSION ?= v$(RAW_VERSION)
|
||||||
|
|
||||||
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
|
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
|
||||||
ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).0-beta.0
|
ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
|
||||||
# Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta
|
# Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta
|
||||||
DEB_VERSION ?= $(subst -,~,$(RAW_VERSION))
|
DEB_VERSION ?= $(subst -,~,$(RAW_VERSION))
|
||||||
RPM_VERSION ?= $(DEB_VERSION)
|
RPM_VERSION ?= $(DEB_VERSION)
|
||||||
|
@ -51,7 +51,7 @@ MINIKUBE_RELEASES_URL=https://github.com/kubernetes/minikube/releases/download
|
||||||
|
|
||||||
KERNEL_VERSION ?= 4.19.88
|
KERNEL_VERSION ?= 4.19.88
|
||||||
# latest from https://github.com/golangci/golangci-lint/releases
|
# latest from https://github.com/golangci/golangci-lint/releases
|
||||||
GOLINT_VERSION ?= v1.21.0
|
GOLINT_VERSION ?= v1.23.2
|
||||||
# Limit number of default jobs, to avoid the CI builds running out of memory
|
# Limit number of default jobs, to avoid the CI builds running out of memory
|
||||||
GOLINT_JOBS ?= 4
|
GOLINT_JOBS ?= 4
|
||||||
# see https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint
|
# see https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint
|
||||||
|
@ -73,6 +73,7 @@ GOARCH ?= $(shell go env GOARCH)
|
||||||
GOPATH ?= $(shell go env GOPATH)
|
GOPATH ?= $(shell go env GOPATH)
|
||||||
BUILD_DIR ?= ./out
|
BUILD_DIR ?= ./out
|
||||||
$(shell mkdir -p $(BUILD_DIR))
|
$(shell mkdir -p $(BUILD_DIR))
|
||||||
|
CURRENT_GIT_BRANCH ?= $(shell git branch | grep \* | cut -d ' ' -f2)
|
||||||
|
|
||||||
# Use system python if it exists, otherwise use Docker.
|
# Use system python if it exists, otherwise use Docker.
|
||||||
PYTHON := $(shell command -v python || echo "docker run --rm -it -v $(shell pwd):/minikube -w /minikube python python")
|
PYTHON := $(shell command -v python || echo "docker run --rm -it -v $(shell pwd):/minikube -w /minikube python python")
|
||||||
|
@ -81,10 +82,16 @@ BUILD_OS := $(shell uname -s)
|
||||||
SHA512SUM=$(shell command -v sha512sum || echo "shasum -a 512")
|
SHA512SUM=$(shell command -v sha512sum || echo "shasum -a 512")
|
||||||
|
|
||||||
STORAGE_PROVISIONER_TAG := v1.8.1
|
STORAGE_PROVISIONER_TAG := v1.8.1
|
||||||
|
# TODO: multi-arch manifest
|
||||||
|
ifeq ($(GOARCH),amd64)
|
||||||
|
STORAGE_PROVISIONER_IMAGE ?= $(REGISTRY)/storage-provisioner:$(STORAGE_PROVISIONER_TAG)
|
||||||
|
else
|
||||||
|
STORAGE_PROVISIONER_IMAGE ?= $(REGISTRY)/storage-provisioner-$(GOARCH):$(STORAGE_PROVISIONER_TAG)
|
||||||
|
endif
|
||||||
|
|
||||||
# Set the version information for the Kubernetes servers
|
# Set the version information for the Kubernetes servers
|
||||||
MINIKUBE_LDFLAGS := -X k8s.io/minikube/pkg/version.version=$(VERSION) -X k8s.io/minikube/pkg/version.isoVersion=$(ISO_VERSION) -X k8s.io/minikube/pkg/version.isoPath=$(ISO_BUCKET) -X k8s.io/minikube/pkg/version.gitCommitID=$(COMMIT)
|
MINIKUBE_LDFLAGS := -X k8s.io/minikube/pkg/version.version=$(VERSION) -X k8s.io/minikube/pkg/version.isoVersion=$(ISO_VERSION) -X k8s.io/minikube/pkg/version.isoPath=$(ISO_BUCKET) -X k8s.io/minikube/pkg/version.gitCommitID=$(COMMIT)
|
||||||
PROVISIONER_LDFLAGS := "$(MINIKUBE_LDFLAGS) -s -w -extldflags '-static'"
|
PROVISIONER_LDFLAGS := "-X k8s.io/minikube/pkg/storage.version=$(STORAGE_PROVISIONER_TAG) -s -w -extldflags '-static'"
|
||||||
|
|
||||||
MINIKUBEFILES := ./cmd/minikube/
|
MINIKUBEFILES := ./cmd/minikube/
|
||||||
HYPERKIT_FILES := ./cmd/drivers/hyperkit
|
HYPERKIT_FILES := ./cmd/drivers/hyperkit
|
||||||
|
@ -368,6 +375,9 @@ mdlint:
|
||||||
out/docs/minikube.md: $(shell find "cmd") $(shell find "pkg/minikube/constants") pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go
|
out/docs/minikube.md: $(shell find "cmd") $(shell find "pkg/minikube/constants") pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go
|
||||||
go run -ldflags="$(MINIKUBE_LDFLAGS)" -tags gendocs hack/help_text/gen_help_text.go
|
go run -ldflags="$(MINIKUBE_LDFLAGS)" -tags gendocs hack/help_text/gen_help_text.go
|
||||||
|
|
||||||
|
deb_version:
|
||||||
|
@echo $(DEB_VERSION)
|
||||||
|
|
||||||
out/minikube_$(DEB_VERSION).deb: out/minikube_$(DEB_VERSION)-0_amd64.deb
|
out/minikube_$(DEB_VERSION).deb: out/minikube_$(DEB_VERSION)-0_amd64.deb
|
||||||
cp $< $@
|
cp $< $@
|
||||||
|
|
||||||
|
@ -381,6 +391,9 @@ out/minikube_$(DEB_VERSION)-0_%.deb: out/minikube-linux-%
|
||||||
fakeroot dpkg-deb --build out/minikube_$(DEB_VERSION) $@
|
fakeroot dpkg-deb --build out/minikube_$(DEB_VERSION) $@
|
||||||
rm -rf out/minikube_$(DEB_VERSION)
|
rm -rf out/minikube_$(DEB_VERSION)
|
||||||
|
|
||||||
|
rpm_version:
|
||||||
|
@echo $(RPM_VERSION)
|
||||||
|
|
||||||
out/minikube-$(RPM_VERSION).rpm: out/minikube-$(RPM_VERSION)-0.x86_64.rpm
|
out/minikube-$(RPM_VERSION).rpm: out/minikube-$(RPM_VERSION)-0.x86_64.rpm
|
||||||
cp $< $@
|
cp $< $@
|
||||||
|
|
||||||
|
@ -472,31 +485,30 @@ $(ISO_BUILD_IMAGE): deploy/iso/minikube-iso/Dockerfile
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "$(@) successfully built"
|
@echo "$(@) successfully built"
|
||||||
|
|
||||||
out/storage-provisioner:
|
out/storage-provisioner: out/storage-provisioner-$(GOARCH)
|
||||||
CGO_ENABLED=0 GOOS=linux go build -o $@ -ldflags=$(PROVISIONER_LDFLAGS) cmd/storage-provisioner/main.go
|
cp $< $@
|
||||||
|
|
||||||
|
out/storage-provisioner-%: cmd/storage-provisioner/main.go pkg/storage/storage_provisioner.go
|
||||||
|
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
|
||||||
|
$(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@)
|
||||||
|
else
|
||||||
|
CGO_ENABLED=0 GOOS=linux GOARCH=$* go build -o $@ -ldflags=$(PROVISIONER_LDFLAGS) cmd/storage-provisioner/main.go
|
||||||
|
endif
|
||||||
|
|
||||||
.PHONY: storage-provisioner-image
|
.PHONY: storage-provisioner-image
|
||||||
storage-provisioner-image: out/storage-provisioner ## Build storage-provisioner docker image
|
storage-provisioner-image: out/storage-provisioner-$(GOARCH) ## Build storage-provisioner docker image
|
||||||
ifeq ($(GOARCH),amd64)
|
docker build -t $(STORAGE_PROVISIONER_IMAGE) -f deploy/storage-provisioner/Dockerfile --build-arg arch=$(GOARCH) .
|
||||||
docker build -t $(REGISTRY)/storage-provisioner:$(STORAGE_PROVISIONER_TAG) -f deploy/storage-provisioner/Dockerfile .
|
|
||||||
else
|
|
||||||
docker build -t $(REGISTRY)/storage-provisioner-$(GOARCH):$(STORAGE_PROVISIONER_TAG) -f deploy/storage-provisioner/Dockerfile-$(GOARCH) .
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: kic-base-image
|
.PHONY: kic-base-image
|
||||||
kic-base-image: ## builds the base image used for kic.
|
kic-base-image: ## builds the base image used for kic.
|
||||||
docker rmi -f $(REGISTRY)/kicbase:v0.0.1-snapshot || true
|
docker rmi -f $(REGISTRY)/kicbase:v0.0.5-snapshot || true
|
||||||
docker build -f ./hack/images/kicbase.Dockerfile -t $(REGISTRY)/kicbase:v0.0.1-snapshot --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) .
|
docker build -f ./hack/images/kicbase.Dockerfile -t $(REGISTRY)/kicbase:v0.0.5-snapshot --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) .
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
.PHONY: push-storage-provisioner-image
|
.PHONY: push-storage-provisioner-image
|
||||||
push-storage-provisioner-image: storage-provisioner-image ## Push storage-provisioner docker image using gcloud
|
push-storage-provisioner-image: storage-provisioner-image ## Push storage-provisioner docker image using gcloud
|
||||||
ifeq ($(GOARCH),amd64)
|
gcloud docker -- push $(STORAGE_PROVISIONER_IMAGE)
|
||||||
gcloud docker -- push $(REGISTRY)/storage-provisioner:$(STORAGE_PROVISIONER_TAG)
|
|
||||||
else
|
|
||||||
gcloud docker -- push $(REGISTRY)/storage-provisioner-$(GOARCH):$(STORAGE_PROVISIONER_TAG)
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: out/gvisor-addon
|
.PHONY: out/gvisor-addon
|
||||||
out/gvisor-addon: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go ## Build gvisor addon
|
out/gvisor-addon: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go ## Build gvisor addon
|
||||||
|
@ -520,13 +532,23 @@ release-minikube: out/minikube checksum ## Minikube release
|
||||||
gsutil cp out/minikube-$(GOOS)-$(GOARCH) $(MINIKUBE_UPLOAD_LOCATION)/$(MINIKUBE_VERSION)/minikube-$(GOOS)-$(GOARCH)
|
gsutil cp out/minikube-$(GOOS)-$(GOARCH) $(MINIKUBE_UPLOAD_LOCATION)/$(MINIKUBE_VERSION)/minikube-$(GOOS)-$(GOARCH)
|
||||||
gsutil cp out/minikube-$(GOOS)-$(GOARCH).sha256 $(MINIKUBE_UPLOAD_LOCATION)/$(MINIKUBE_VERSION)/minikube-$(GOOS)-$(GOARCH).sha256
|
gsutil cp out/minikube-$(GOOS)-$(GOARCH).sha256 $(MINIKUBE_UPLOAD_LOCATION)/$(MINIKUBE_VERSION)/minikube-$(GOOS)-$(GOARCH).sha256
|
||||||
|
|
||||||
out/docker-machine-driver-kvm2:
|
out/docker-machine-driver-kvm2: out/docker-machine-driver-kvm2-amd64
|
||||||
|
cp $< $@
|
||||||
|
|
||||||
|
out/docker-machine-driver-kvm2-x86_64: out/docker-machine-driver-kvm2-amd64
|
||||||
|
cp $< $@
|
||||||
|
|
||||||
|
out/docker-machine-driver-kvm2-aarch64: out/docker-machine-driver-kvm2-arm64
|
||||||
|
cp $< $@
|
||||||
|
|
||||||
|
out/docker-machine-driver-kvm2-%:
|
||||||
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
|
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
|
||||||
docker inspect -f '{{.Id}} {{.RepoTags}}' $(KVM_BUILD_IMAGE) || $(MAKE) kvm-image
|
docker inspect -f '{{.Id}} {{.RepoTags}}' $(KVM_BUILD_IMAGE) || $(MAKE) kvm-image
|
||||||
$(call DOCKER,$(KVM_BUILD_IMAGE),/usr/bin/make $@ COMMIT=$(COMMIT))
|
$(call DOCKER,$(KVM_BUILD_IMAGE),/usr/bin/make $@ COMMIT=$(COMMIT))
|
||||||
# make extra sure that we are linking with the older version of libvirt (1.3.1)
|
# make extra sure that we are linking with the older version of libvirt (1.3.1)
|
||||||
test "`strings $@ | grep '^LIBVIRT_[0-9]' | sort | tail -n 1`" = "LIBVIRT_1.2.9"
|
test "`strings $@ | grep '^LIBVIRT_[0-9]' | sort | tail -n 1`" = "LIBVIRT_1.2.9"
|
||||||
else
|
else
|
||||||
|
GOARCH=$* \
|
||||||
go build \
|
go build \
|
||||||
-installsuffix "static" \
|
-installsuffix "static" \
|
||||||
-ldflags="$(KVM2_LDFLAGS)" \
|
-ldflags="$(KVM2_LDFLAGS)" \
|
||||||
|
@ -536,21 +558,29 @@ else
|
||||||
endif
|
endif
|
||||||
chmod +X $@
|
chmod +X $@
|
||||||
|
|
||||||
out/docker-machine-driver-kvm2_$(DEB_VERSION).deb: out/docker-machine-driver-kvm2
|
out/docker-machine-driver-kvm2_$(DEB_VERSION).deb: out/docker-machine-driver-kvm2_$(DEB_VERSION)-0_amd64.deb
|
||||||
|
cp $< $@
|
||||||
|
|
||||||
|
out/docker-machine-driver-kvm2_$(DEB_VERSION)-0_%.deb: out/docker-machine-driver-kvm2-%
|
||||||
cp -r installers/linux/deb/kvm2_deb_template out/docker-machine-driver-kvm2_$(DEB_VERSION)
|
cp -r installers/linux/deb/kvm2_deb_template out/docker-machine-driver-kvm2_$(DEB_VERSION)
|
||||||
chmod 0755 out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN
|
chmod 0755 out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN
|
||||||
sed -E -i 's/--VERSION--/'$(DEB_VERSION)'/g' out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN/control
|
sed -E -i 's/--VERSION--/'$(DEB_VERSION)'/g' out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN/control
|
||||||
|
sed -E -i 's/--ARCH--/'$*'/g' out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN/control
|
||||||
mkdir -p out/docker-machine-driver-kvm2_$(DEB_VERSION)/usr/bin
|
mkdir -p out/docker-machine-driver-kvm2_$(DEB_VERSION)/usr/bin
|
||||||
cp out/docker-machine-driver-kvm2 out/docker-machine-driver-kvm2_$(DEB_VERSION)/usr/bin/docker-machine-driver-kvm2
|
cp $< out/docker-machine-driver-kvm2_$(DEB_VERSION)/usr/bin/docker-machine-driver-kvm2
|
||||||
fakeroot dpkg-deb --build out/docker-machine-driver-kvm2_$(DEB_VERSION)
|
fakeroot dpkg-deb --build out/docker-machine-driver-kvm2_$(DEB_VERSION) $@
|
||||||
rm -rf out/docker-machine-driver-kvm2_$(DEB_VERSION)
|
rm -rf out/docker-machine-driver-kvm2_$(DEB_VERSION)
|
||||||
|
|
||||||
out/docker-machine-driver-kvm2-$(RPM_VERSION).rpm: out/docker-machine-driver-kvm2
|
out/docker-machine-driver-kvm2-$(RPM_VERSION).rpm: out/docker-machine-driver-kvm2-$(RPM_VERSION)-0.x86_64.deb
|
||||||
|
cp $< $@
|
||||||
|
|
||||||
|
out/docker-machine-driver-kvm2-$(RPM_VERSION)-0.%.rpm: out/docker-machine-driver-kvm2-%
|
||||||
cp -r installers/linux/rpm/kvm2_rpm_template out/docker-machine-driver-kvm2-$(RPM_VERSION)
|
cp -r installers/linux/rpm/kvm2_rpm_template out/docker-machine-driver-kvm2-$(RPM_VERSION)
|
||||||
sed -E -i 's/--VERSION--/'$(RPM_VERSION)'/g' out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
|
sed -E -i 's/--VERSION--/'$(RPM_VERSION)'/g' out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
|
||||||
sed -E -i 's|--OUT--|'$(PWD)/out'|g' out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
|
sed -E -i 's|--OUT--|'$(PWD)/out'|g' out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
|
||||||
rpmbuild -bb -D "_rpmdir $(PWD)/out" -D "_rpmfilename docker-machine-driver-kvm2-$(RPM_VERSION).rpm" \
|
rpmbuild -bb -D "_rpmdir $(PWD)/out" --target $* \
|
||||||
out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
|
out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
|
||||||
|
@mv out/$*/docker-machine-driver-kvm2-$(RPM_VERSION)-0.$*.rpm out/ && rmdir out/$*
|
||||||
rm -rf out/docker-machine-driver-kvm2-$(RPM_VERSION)
|
rm -rf out/docker-machine-driver-kvm2-$(RPM_VERSION)
|
||||||
|
|
||||||
.PHONY: kvm-image
|
.PHONY: kvm-image
|
||||||
|
@ -598,6 +628,15 @@ out/mkcmp:
|
||||||
out/performance-monitor:
|
out/performance-monitor:
|
||||||
GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o $@ cmd/performance/monitor/monitor.go
|
GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o $@ cmd/performance/monitor/monitor.go
|
||||||
|
|
||||||
|
.PHONY: compare
|
||||||
|
compare: out/mkcmp out/minikube
|
||||||
|
mv out/minikube out/$(CURRENT_GIT_BRANCH).minikube
|
||||||
|
git checkout master
|
||||||
|
make out/minikube
|
||||||
|
mv out/minikube out/master.minikube
|
||||||
|
git checkout $(CURRENT_GIT_BRANCH)
|
||||||
|
out/mkcmp out/master.minikube out/$(CURRENT_GIT_BRANCH).minikube
|
||||||
|
|
||||||
|
|
||||||
.PHONY: help
|
.PHONY: help
|
||||||
help:
|
help:
|
||||||
|
|
|
@ -102,9 +102,9 @@ var printAddonsList = func() {
|
||||||
|
|
||||||
for _, addonName := range addonNames {
|
for _, addonName := range addonNames {
|
||||||
addonBundle := assets.Addons[addonName]
|
addonBundle := assets.Addons[addonName]
|
||||||
addonStatus, err := addonBundle.IsEnabled()
|
addonStatus, err := addonBundle.IsEnabled(pName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("Error getting addons status", err)
|
out.WarningT("Unable to get addon status for {{.name}}: {{.error}}", out.V{"name": addonName, "error": err})
|
||||||
}
|
}
|
||||||
tData = append(tData, []string{addonName, pName, fmt.Sprintf("%s %s", stringFromStatus(addonStatus), iconFromStatus(addonStatus))})
|
tData = append(tData, []string{addonName, pName, fmt.Sprintf("%s %s", stringFromStatus(addonStatus), iconFromStatus(addonStatus))})
|
||||||
}
|
}
|
||||||
|
@ -114,12 +114,11 @@ var printAddonsList = func() {
|
||||||
|
|
||||||
v, _, err := config.ListProfiles()
|
v, _, err := config.ListProfiles()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Infof("error getting list of porfiles: %v", err)
|
glog.Errorf("list profiles returned error: %v", err)
|
||||||
}
|
}
|
||||||
if len(v) > 1 {
|
if len(v) > 1 {
|
||||||
out.T(out.Tip, "To see addons list for other profiles use: `minikube addons -p name list`")
|
out.T(out.Tip, "To see addons list for other profiles use: `minikube addons -p name list`")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var printAddonsJSON = func() {
|
var printAddonsJSON = func() {
|
||||||
|
@ -135,9 +134,10 @@ var printAddonsJSON = func() {
|
||||||
for _, addonName := range addonNames {
|
for _, addonName := range addonNames {
|
||||||
addonBundle := assets.Addons[addonName]
|
addonBundle := assets.Addons[addonName]
|
||||||
|
|
||||||
addonStatus, err := addonBundle.IsEnabled()
|
addonStatus, err := addonBundle.IsEnabled(pName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("Error getting addons status", err)
|
glog.Errorf("Unable to get addon status for %s: %v", addonName, err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
addonsMap[addonName] = map[string]interface{}{
|
addonsMap[addonName] = map[string]interface{}{
|
||||||
|
|
|
@ -53,6 +53,9 @@ var addonsConfigureCmd = &cobra.Command{
|
||||||
dockerUser := "changeme"
|
dockerUser := "changeme"
|
||||||
dockerPass := "changeme"
|
dockerPass := "changeme"
|
||||||
gcrURL := "https://gcr.io"
|
gcrURL := "https://gcr.io"
|
||||||
|
acrURL := "changeme"
|
||||||
|
acrClientID := "changeme"
|
||||||
|
acrPassword := "changeme"
|
||||||
|
|
||||||
enableAWSECR := AskForYesNoConfirmation("\nDo you want to enable AWS Elastic Container Registry?", posResponses, negResponses)
|
enableAWSECR := AskForYesNoConfirmation("\nDo you want to enable AWS Elastic Container Registry?", posResponses, negResponses)
|
||||||
if enableAWSECR {
|
if enableAWSECR {
|
||||||
|
@ -90,6 +93,13 @@ var addonsConfigureCmd = &cobra.Command{
|
||||||
dockerPass = AskForPasswordValue("-- Enter docker registry password: ")
|
dockerPass = AskForPasswordValue("-- Enter docker registry password: ")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enableACR := AskForYesNoConfirmation("\nDo you want to enable Azure Container Registry?", posResponses, negResponses)
|
||||||
|
if enableACR {
|
||||||
|
acrURL = AskForStaticValue("-- Enter Azure Container Registry (ACR) URL: ")
|
||||||
|
acrClientID = AskForStaticValue("-- Enter client ID (service principal ID) to access ACR: ")
|
||||||
|
acrPassword = AskForPasswordValue("-- Enter service principal password to access Azure Container Registry: ")
|
||||||
|
}
|
||||||
|
|
||||||
// Create ECR Secret
|
// Create ECR Secret
|
||||||
err := service.CreateSecret(
|
err := service.CreateSecret(
|
||||||
"kube-system",
|
"kube-system",
|
||||||
|
@ -148,6 +158,26 @@ var addonsConfigureCmd = &cobra.Command{
|
||||||
if err != nil {
|
if err != nil {
|
||||||
out.WarningT("ERROR creating `registry-creds-dpr` secret")
|
out.WarningT("ERROR creating `registry-creds-dpr` secret")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create Azure Container Registry Secret
|
||||||
|
err = service.CreateSecret(
|
||||||
|
"kube-system",
|
||||||
|
"registry-creds-acr",
|
||||||
|
map[string]string{
|
||||||
|
"ACR_URL": acrURL,
|
||||||
|
"ACR_CLIENT_ID": acrClientID,
|
||||||
|
"ACR_PASSWORD": acrPassword,
|
||||||
|
},
|
||||||
|
map[string]string{
|
||||||
|
"app": "registry-creds",
|
||||||
|
"cloud": "acr",
|
||||||
|
"kubernetes.io/minikube-addons": "registry-creds",
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
out.WarningT("ERROR creating `registry-creds-acr` secret")
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
out.FailureT("{{.name}} has no available configuration options", out.V{"name": addon})
|
out.FailureT("{{.name}} has no available configuration options", out.V{"name": addon})
|
||||||
return
|
return
|
||||||
|
|
|
@ -39,7 +39,7 @@ var addonsDisableCmd = &cobra.Command{
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("disable failed", err)
|
exit.WithError("disable failed", err)
|
||||||
}
|
}
|
||||||
out.SuccessT(`"{{.minikube_addon}}" was successfully disabled`, out.V{"minikube_addon": addon})
|
out.T(out.AddonDisable, `"The '{{.minikube_addon}}' addon is disabled`, out.V{"minikube_addon": addon})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,13 +33,12 @@ var addonsEnableCmd = &cobra.Command{
|
||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
exit.UsageT("usage: minikube addons enable ADDON_NAME")
|
exit.UsageT("usage: minikube addons enable ADDON_NAME")
|
||||||
}
|
}
|
||||||
|
|
||||||
addon := args[0]
|
addon := args[0]
|
||||||
err := addons.Set(addon, "true", viper.GetString(config.MachineProfile))
|
err := addons.Set(addon, "true", viper.GetString(config.MachineProfile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("enable failed", err)
|
exit.WithError("enable failed", err)
|
||||||
}
|
}
|
||||||
out.SuccessT("{{.addonName}} was successfully enabled", out.V{"addonName": addon})
|
out.T(out.AddonEnable, "The '{{.addonName}}' addon is enabled", out.V{"addonName": addon})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,8 +24,10 @@ import (
|
||||||
"github.com/pkg/browser"
|
"github.com/pkg/browser"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/viper"
|
||||||
"k8s.io/minikube/pkg/minikube/assets"
|
"k8s.io/minikube/pkg/minikube/assets"
|
||||||
"k8s.io/minikube/pkg/minikube/cluster"
|
"k8s.io/minikube/pkg/minikube/cluster"
|
||||||
|
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||||
"k8s.io/minikube/pkg/minikube/exit"
|
"k8s.io/minikube/pkg/minikube/exit"
|
||||||
"k8s.io/minikube/pkg/minikube/machine"
|
"k8s.io/minikube/pkg/minikube/machine"
|
||||||
"k8s.io/minikube/pkg/minikube/out"
|
"k8s.io/minikube/pkg/minikube/out"
|
||||||
|
@ -66,7 +68,8 @@ var addonsOpenCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
defer api.Close()
|
defer api.Close()
|
||||||
|
|
||||||
if !cluster.IsMinikubeRunning(api) {
|
profileName := viper.GetString(pkg_config.MachineProfile)
|
||||||
|
if !cluster.IsHostRunning(api, profileName) {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
addon, ok := assets.Addons[addonName] // validate addon input
|
addon, ok := assets.Addons[addonName] // validate addon input
|
||||||
|
@ -75,7 +78,7 @@ var addonsOpenCmd = &cobra.Command{
|
||||||
To see the list of available addons run:
|
To see the list of available addons run:
|
||||||
minikube addons list`, out.V{"name": addonName})
|
minikube addons list`, out.V{"name": addonName})
|
||||||
}
|
}
|
||||||
ok, err = addon.IsEnabled()
|
ok, err = addon.IsEnabled(profileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("IsEnabled failed", err)
|
exit.WithError("IsEnabled failed", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
pkgConfig "k8s.io/minikube/pkg/minikube/config"
|
pkgConfig "k8s.io/minikube/pkg/minikube/config"
|
||||||
|
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||||
"k8s.io/minikube/pkg/minikube/constants"
|
"k8s.io/minikube/pkg/minikube/constants"
|
||||||
"k8s.io/minikube/pkg/minikube/exit"
|
"k8s.io/minikube/pkg/minikube/exit"
|
||||||
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
||||||
|
@ -78,7 +79,7 @@ var ProfileCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
cc, err := pkgConfig.Load(profile)
|
cc, err := pkgConfig.Load(profile)
|
||||||
// might err when loading older version of cfg file that doesn't have KeepContext field
|
// might err when loading older version of cfg file that doesn't have KeepContext field
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !pkg_config.IsNotExist(err) {
|
||||||
out.ErrT(out.Sad, `Error loading profile config: {{.error}}`, out.V{"error": err})
|
out.ErrT(out.Sad, `Error loading profile config: {{.error}}`, out.V{"error": err})
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|
|
@ -60,7 +60,7 @@ var printProfilesTable = func() {
|
||||||
|
|
||||||
var validData [][]string
|
var validData [][]string
|
||||||
table := tablewriter.NewWriter(os.Stdout)
|
table := tablewriter.NewWriter(os.Stdout)
|
||||||
table.SetHeader([]string{"Profile", "VM Driver", "NodeIP", "Node Port", "Kubernetes Version", "Status"})
|
table.SetHeader([]string{"Profile", "VM Driver", "Runtime", "IP", "Port", "Version", "Status"})
|
||||||
table.SetAutoFormatHeaders(false)
|
table.SetAutoFormatHeaders(false)
|
||||||
table.SetBorders(tablewriter.Border{Left: true, Top: true, Right: true, Bottom: true})
|
table.SetBorders(tablewriter.Border{Left: true, Top: true, Right: true, Bottom: true})
|
||||||
table.SetCenterSeparator("|")
|
table.SetCenterSeparator("|")
|
||||||
|
@ -71,16 +71,21 @@ var printProfilesTable = func() {
|
||||||
}
|
}
|
||||||
api, err := machine.NewAPIClient()
|
api, err := machine.NewAPIClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Infof("failed to get machine api client %v", err)
|
glog.Errorf("failed to get machine api client %v", err)
|
||||||
}
|
}
|
||||||
defer api.Close()
|
defer api.Close()
|
||||||
|
|
||||||
for _, p := range validProfiles {
|
for _, p := range validProfiles {
|
||||||
p.Status, err = cluster.GetHostStatus(api, p.Name)
|
p.Status, err = cluster.GetHostStatus(api, p.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Infof("error getting host status for %v", err)
|
glog.Warningf("error getting host status for %s: %v", p.Name, err)
|
||||||
}
|
}
|
||||||
validData = append(validData, []string{p.Name, p.Config[0].VMDriver, p.Config[0].KubernetesConfig.NodeIP, strconv.Itoa(p.Config[0].KubernetesConfig.NodePort), p.Config[0].KubernetesConfig.KubernetesVersion, p.Status})
|
cp, err := config.PrimaryControlPlane(*p.Config)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("%q has no control plane: %v", p.Name, err)
|
||||||
|
// Print the data we know about anyways
|
||||||
|
}
|
||||||
|
validData = append(validData, []string{p.Name, p.Config.VMDriver, p.Config.KubernetesConfig.ContainerRuntime, cp.IP, strconv.Itoa(cp.Port), p.Config.KubernetesConfig.KubernetesVersion, p.Status})
|
||||||
}
|
}
|
||||||
|
|
||||||
table.AppendBulk(validData)
|
table.AppendBulk(validData)
|
||||||
|
@ -107,7 +112,7 @@ var printProfilesTable = func() {
|
||||||
var printProfilesJSON = func() {
|
var printProfilesJSON = func() {
|
||||||
api, err := machine.NewAPIClient()
|
api, err := machine.NewAPIClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Infof("failed to get machine api client %v", err)
|
glog.Errorf("failed to get machine api client %v", err)
|
||||||
}
|
}
|
||||||
defer api.Close()
|
defer api.Close()
|
||||||
|
|
||||||
|
@ -115,7 +120,7 @@ var printProfilesJSON = func() {
|
||||||
for _, v := range validProfiles {
|
for _, v := range validProfiles {
|
||||||
status, err := cluster.GetHostStatus(api, v.Name)
|
status, err := cluster.GetHostStatus(api, v.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Infof("error getting host status for %v", err)
|
glog.Warningf("error getting host status for %s: %v", v.Name, err)
|
||||||
}
|
}
|
||||||
v.Status = status
|
v.Status = status
|
||||||
}
|
}
|
||||||
|
@ -137,7 +142,7 @@ var printProfilesJSON = func() {
|
||||||
|
|
||||||
var body = map[string]interface{}{}
|
var body = map[string]interface{}{}
|
||||||
|
|
||||||
if err == nil || os.IsNotExist(err) {
|
if err == nil || config.IsNotExist(err) {
|
||||||
body["valid"] = valid
|
body["valid"] = valid
|
||||||
body["invalid"] = invalid
|
body["invalid"] = invalid
|
||||||
jsonString, _ := json.Marshal(body)
|
jsonString, _ := json.Marshal(body)
|
||||||
|
|
|
@ -16,9 +16,18 @@ limitations under the License.
|
||||||
|
|
||||||
package config
|
package config
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
|
"k8s.io/minikube/pkg/minikube/localpath"
|
||||||
|
)
|
||||||
|
|
||||||
func TestNotFound(t *testing.T) {
|
func TestNotFound(t *testing.T) {
|
||||||
|
createTestProfile(t)
|
||||||
err := Set("nonexistent", "10")
|
err := Set("nonexistent", "10")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Set did not return error for unknown property")
|
t.Fatalf("Set did not return error for unknown property")
|
||||||
|
@ -26,6 +35,7 @@ func TestNotFound(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetNotAllowed(t *testing.T) {
|
func TestSetNotAllowed(t *testing.T) {
|
||||||
|
createTestProfile(t)
|
||||||
err := Set("vm-driver", "123456")
|
err := Set("vm-driver", "123456")
|
||||||
if err == nil || err.Error() != "[driver \"123456\" is not supported]" {
|
if err == nil || err.Error() != "[driver \"123456\" is not supported]" {
|
||||||
t.Fatalf("Set did not return error for unallowed value")
|
t.Fatalf("Set did not return error for unallowed value")
|
||||||
|
@ -33,7 +43,14 @@ func TestSetNotAllowed(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetOK(t *testing.T) {
|
func TestSetOK(t *testing.T) {
|
||||||
|
createTestProfile(t)
|
||||||
err := Set("vm-driver", "virtualbox")
|
err := Set("vm-driver", "virtualbox")
|
||||||
|
defer func() {
|
||||||
|
err = Unset("vm-driver")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to unset vm-driver")
|
||||||
|
}
|
||||||
|
}()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Set returned error for valid property value")
|
t.Fatalf("Set returned error for valid property value")
|
||||||
}
|
}
|
||||||
|
@ -45,3 +62,25 @@ func TestSetOK(t *testing.T) {
|
||||||
t.Fatalf("Get returned %s, expected \"virtualbox\"", val)
|
t.Fatalf("Get returned %s, expected \"virtualbox\"", val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func createTestProfile(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
td, err := ioutil.TempDir("", "profile")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("tempdir: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.Setenv(localpath.MinikubeHome, td)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("error setting up test environment. could not set %s", localpath.MinikubeHome)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not necessary, but it is a handy random alphanumeric
|
||||||
|
name := filepath.Base(td)
|
||||||
|
if err := os.MkdirAll(config.ProfileFolderPath(name), 0777); err != nil {
|
||||||
|
t.Fatalf("error creating temporary directory")
|
||||||
|
}
|
||||||
|
if err := config.DefaultLoader.WriteConfigToFile(name, &config.MachineConfig{}); err != nil {
|
||||||
|
t.Fatalf("error creating temporary profile config: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -36,7 +36,6 @@ import (
|
||||||
pkgaddons "k8s.io/minikube/pkg/addons"
|
pkgaddons "k8s.io/minikube/pkg/addons"
|
||||||
"k8s.io/minikube/pkg/minikube/assets"
|
"k8s.io/minikube/pkg/minikube/assets"
|
||||||
"k8s.io/minikube/pkg/minikube/cluster"
|
"k8s.io/minikube/pkg/minikube/cluster"
|
||||||
"k8s.io/minikube/pkg/minikube/config"
|
|
||||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||||
"k8s.io/minikube/pkg/minikube/exit"
|
"k8s.io/minikube/pkg/minikube/exit"
|
||||||
"k8s.io/minikube/pkg/minikube/machine"
|
"k8s.io/minikube/pkg/minikube/machine"
|
||||||
|
@ -59,11 +58,17 @@ var dashboardCmd = &cobra.Command{
|
||||||
Short: "Access the kubernetes dashboard running within the minikube cluster",
|
Short: "Access the kubernetes dashboard running within the minikube cluster",
|
||||||
Long: `Access the kubernetes dashboard running within the minikube cluster`,
|
Long: `Access the kubernetes dashboard running within the minikube cluster`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
cc, err := pkg_config.Load(viper.GetString(config.MachineProfile))
|
profileName := viper.GetString(pkg_config.MachineProfile)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
cc, err := pkg_config.Load(profileName)
|
||||||
|
if err != nil && !pkg_config.IsNotExist(err) {
|
||||||
exit.WithError("Error loading profile config", err)
|
exit.WithError("Error loading profile config", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
out.ErrT(out.Meh, `"{{.name}}" profile does not exist`, out.V{"name": profileName})
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
api, err := machine.NewAPIClient()
|
api, err := machine.NewAPIClient()
|
||||||
defer func() {
|
defer func() {
|
||||||
err := api.Close()
|
err := api.Close()
|
||||||
|
@ -85,9 +90,11 @@ var dashboardCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = proxy.ExcludeIP(cc.KubernetesConfig.NodeIP) // to be used for http get calls
|
for _, n := range cc.Nodes {
|
||||||
if err != nil {
|
err = proxy.ExcludeIP(n.IP) // to be used for http get calls
|
||||||
glog.Errorf("Error excluding IP from proxy: %s", err)
|
if err != nil {
|
||||||
|
glog.Errorf("Error excluding IP from proxy: %s", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kubectl, err := exec.LookPath("kubectl")
|
kubectl, err := exec.LookPath("kubectl")
|
||||||
|
@ -95,18 +102,18 @@ var dashboardCmd = &cobra.Command{
|
||||||
exit.WithCodeT(exit.NoInput, "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/")
|
exit.WithCodeT(exit.NoInput, "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !cluster.IsMinikubeRunning(api) {
|
if !cluster.IsHostRunning(api, profileName) {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check dashboard status before enabling it
|
// Check dashboard status before enabling it
|
||||||
dashboardAddon := assets.Addons["dashboard"]
|
dashboardAddon := assets.Addons["dashboard"]
|
||||||
dashboardStatus, _ := dashboardAddon.IsEnabled()
|
dashboardStatus, _ := dashboardAddon.IsEnabled(profileName)
|
||||||
if !dashboardStatus {
|
if !dashboardStatus {
|
||||||
// Send status messages to stderr for folks re-using this output.
|
// Send status messages to stderr for folks re-using this output.
|
||||||
out.ErrT(out.Enabling, "Enabling dashboard ...")
|
out.ErrT(out.Enabling, "Enabling dashboard ...")
|
||||||
// Enable the dashboard add-on
|
// Enable the dashboard add-on
|
||||||
err = pkgaddons.Set("dashboard", "true", viper.GetString(config.MachineProfile))
|
err = pkgaddons.Set("dashboard", "true", profileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("Unable to enable dashboard", err)
|
exit.WithError("Unable to enable dashboard", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,6 +35,7 @@ import (
|
||||||
"k8s.io/minikube/pkg/minikube/cluster"
|
"k8s.io/minikube/pkg/minikube/cluster"
|
||||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||||
"k8s.io/minikube/pkg/minikube/constants"
|
"k8s.io/minikube/pkg/minikube/constants"
|
||||||
|
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||||
"k8s.io/minikube/pkg/minikube/driver"
|
"k8s.io/minikube/pkg/minikube/driver"
|
||||||
"k8s.io/minikube/pkg/minikube/exit"
|
"k8s.io/minikube/pkg/minikube/exit"
|
||||||
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
||||||
|
@ -131,14 +132,12 @@ func runDelete(cmd *cobra.Command, args []string) {
|
||||||
profileName := viper.GetString(pkg_config.MachineProfile)
|
profileName := viper.GetString(pkg_config.MachineProfile)
|
||||||
profile, err := pkg_config.LoadProfile(profileName)
|
profile, err := pkg_config.LoadProfile(profileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
out.ErrT(out.Meh, `"{{.name}}" profile does not exist`, out.V{"name": profileName})
|
out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": profileName})
|
||||||
}
|
}
|
||||||
|
|
||||||
errs := DeleteProfiles([]*pkg_config.Profile{profile})
|
errs := DeleteProfiles([]*pkg_config.Profile{profile})
|
||||||
if len(errs) > 0 {
|
if len(errs) > 0 {
|
||||||
HandleDeletionErrors(errs)
|
HandleDeletionErrors(errs)
|
||||||
} else {
|
|
||||||
out.T(out.DeletingHost, "Successfully deleted profile \"{{.name}}\"", out.V{"name": profileName})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,7 +152,7 @@ func purgeMinikubeDirectory() {
|
||||||
if err := os.RemoveAll(localpath.MiniPath()); err != nil {
|
if err := os.RemoveAll(localpath.MiniPath()); err != nil {
|
||||||
exit.WithError("unable to delete minikube config folder", err)
|
exit.WithError("unable to delete minikube config folder", err)
|
||||||
}
|
}
|
||||||
out.T(out.Crushed, "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]", out.V{"minikubeDirectory": localpath.MiniPath()})
|
out.T(out.Deleted, "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]", out.V{"minikubeDirectory": localpath.MiniPath()})
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteProfiles deletes one or more profiles
|
// DeleteProfiles deletes one or more profiles
|
||||||
|
@ -188,13 +187,13 @@ func deleteProfile(profile *pkg_config.Profile) error {
|
||||||
}
|
}
|
||||||
defer api.Close()
|
defer api.Close()
|
||||||
cc, err := pkg_config.Load(profile.Name)
|
cc, err := pkg_config.Load(profile.Name)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !pkg_config.IsNotExist(err) {
|
||||||
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("error loading profile config: %v", err))
|
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("error loading profile config: %v", err))
|
||||||
return DeletionError{Err: delErr, Errtype: MissingProfile}
|
return DeletionError{Err: delErr, Errtype: MissingProfile}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == nil && driver.BareMetal(cc.VMDriver) {
|
if err == nil && driver.BareMetal(cc.VMDriver) {
|
||||||
if err := uninstallKubernetes(api, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper)); err != nil {
|
if err := uninstallKubernetes(api, profile.Name, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper)); err != nil {
|
||||||
deletionError, ok := err.(DeletionError)
|
deletionError, ok := err.(DeletionError)
|
||||||
if ok {
|
if ok {
|
||||||
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err))
|
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err))
|
||||||
|
@ -212,7 +211,7 @@ func deleteProfile(profile *pkg_config.Profile) error {
|
||||||
if err = cluster.DeleteHost(api, profile.Name); err != nil {
|
if err = cluster.DeleteHost(api, profile.Name); err != nil {
|
||||||
switch errors.Cause(err).(type) {
|
switch errors.Cause(err).(type) {
|
||||||
case mcnerror.ErrHostDoesNotExist:
|
case mcnerror.ErrHostDoesNotExist:
|
||||||
out.T(out.Meh, `"{{.name}}" cluster does not exist. Proceeding ahead with cleanup.`, out.V{"name": profile.Name})
|
glog.Infof("%s cluster does not exist. Proceeding ahead with cleanup.", profile.Name)
|
||||||
default:
|
default:
|
||||||
out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err})
|
out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err})
|
||||||
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name})
|
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name})
|
||||||
|
@ -223,7 +222,7 @@ func deleteProfile(profile *pkg_config.Profile) error {
|
||||||
deleteProfileDirectory(profile.Name)
|
deleteProfileDirectory(profile.Name)
|
||||||
|
|
||||||
if err := pkg_config.DeleteProfile(profile.Name); err != nil {
|
if err := pkg_config.DeleteProfile(profile.Name); err != nil {
|
||||||
if os.IsNotExist(err) {
|
if pkg_config.IsNotExist(err) {
|
||||||
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("\"%s\" profile does not exist", profile.Name))
|
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("\"%s\" profile does not exist", profile.Name))
|
||||||
return DeletionError{Err: delErr, Errtype: MissingProfile}
|
return DeletionError{Err: delErr, Errtype: MissingProfile}
|
||||||
}
|
}
|
||||||
|
@ -231,11 +230,10 @@ func deleteProfile(profile *pkg_config.Profile) error {
|
||||||
return DeletionError{Err: delErr, Errtype: Fatal}
|
return DeletionError{Err: delErr, Errtype: Fatal}
|
||||||
}
|
}
|
||||||
|
|
||||||
out.T(out.Crushed, `The "{{.name}}" cluster has been deleted.`, out.V{"name": profile.Name})
|
|
||||||
|
|
||||||
if err := deleteContext(profile.Name); err != nil {
|
if err := deleteContext(profile.Name); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
out.T(out.Deleted, `Removed all traces of the "{{.name}}" cluster.`, out.V{"name": profile.Name})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -276,12 +274,34 @@ func profileDeletionErr(profileName string, additionalInfo string) error {
|
||||||
return fmt.Errorf("error deleting profile \"%s\": %s", profileName, additionalInfo)
|
return fmt.Errorf("error deleting profile \"%s\": %s", profileName, additionalInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func uninstallKubernetes(api libmachine.API, kc pkg_config.KubernetesConfig, bsName string) error {
|
func uninstallKubernetes(api libmachine.API, profile string, kc pkg_config.KubernetesConfig, bsName string) error {
|
||||||
out.T(out.Resetting, "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...", out.V{"kubernetes_version": kc.KubernetesVersion, "bootstrapper_name": bsName})
|
out.T(out.Resetting, "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...", out.V{"kubernetes_version": kc.KubernetesVersion, "bootstrapper_name": bsName})
|
||||||
clusterBootstrapper, err := getClusterBootstrapper(api, bsName)
|
clusterBootstrapper, err := getClusterBootstrapper(api, bsName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return DeletionError{Err: fmt.Errorf("unable to get bootstrapper: %v", err), Errtype: Fatal}
|
return DeletionError{Err: fmt.Errorf("unable to get bootstrapper: %v", err), Errtype: Fatal}
|
||||||
} else if err = clusterBootstrapper.DeleteCluster(kc); err != nil {
|
}
|
||||||
|
|
||||||
|
host, err := cluster.CheckIfHostExistsAndLoad(api, profile)
|
||||||
|
if err != nil {
|
||||||
|
exit.WithError("Error getting host", err)
|
||||||
|
}
|
||||||
|
r, err := machine.CommandRunner(host)
|
||||||
|
if err != nil {
|
||||||
|
exit.WithError("Failed to get command runner", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cr, err := cruntime.New(cruntime.Config{Type: kc.ContainerRuntime, Runner: r})
|
||||||
|
if err != nil {
|
||||||
|
exit.WithError("Failed runtime", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unpause the cluster if necessary to avoid hung kubeadm
|
||||||
|
_, err = cluster.Unpause(cr, r, nil)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("unpause failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = clusterBootstrapper.DeleteCluster(kc); err != nil {
|
||||||
return DeletionError{Err: fmt.Errorf("failed to delete cluster: %v", err), Errtype: Fatal}
|
return DeletionError{Err: fmt.Errorf("failed to delete cluster: %v", err), Errtype: Fatal}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -36,14 +36,15 @@ import (
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"k8s.io/minikube/pkg/minikube/cluster"
|
"k8s.io/minikube/pkg/minikube/cluster"
|
||||||
"k8s.io/minikube/pkg/minikube/config"
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
|
"k8s.io/minikube/pkg/minikube/constants"
|
||||||
"k8s.io/minikube/pkg/minikube/driver"
|
"k8s.io/minikube/pkg/minikube/driver"
|
||||||
"k8s.io/minikube/pkg/minikube/exit"
|
"k8s.io/minikube/pkg/minikube/exit"
|
||||||
"k8s.io/minikube/pkg/minikube/machine"
|
"k8s.io/minikube/pkg/minikube/machine"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
var envTmpl = fmt.Sprintf("{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerTLSVerify }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerHost }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerCertPath }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .MinikubeDockerdProfile }}{{ .Suffix }}{{ if .NoProxyVar }}{{ .Prefix }}{{ .NoProxyVar }}{{ .Delimiter }}{{ .NoProxyValue }}{{ .Suffix }}{{end}}{{ .UsageHint }}", constants.DockerTLSVerifyEnv, constants.DockerHostEnv, constants.DockerCertPathEnv, constants.MinikubeActiveDockerdEnv)
|
||||||
envTmpl = `{{ .Prefix }}DOCKER_TLS_VERIFY{{ .Delimiter }}{{ .DockerTLSVerify }}{{ .Suffix }}{{ .Prefix }}DOCKER_HOST{{ .Delimiter }}{{ .DockerHost }}{{ .Suffix }}{{ .Prefix }}DOCKER_CERT_PATH{{ .Delimiter }}{{ .DockerCertPath }}{{ .Suffix }}{{ if .NoProxyVar }}{{ .Prefix }}{{ .NoProxyVar }}{{ .Delimiter }}{{ .NoProxyValue }}{{ .Suffix }}{{end}}{{ .UsageHint }}`
|
|
||||||
|
|
||||||
|
const (
|
||||||
fishSetPfx = "set -gx "
|
fishSetPfx = "set -gx "
|
||||||
fishSetSfx = "\";\n"
|
fishSetSfx = "\";\n"
|
||||||
fishSetDelim = " \""
|
fishSetDelim = " \""
|
||||||
|
@ -89,35 +90,18 @@ const (
|
||||||
noneDelim = "="
|
noneDelim = "="
|
||||||
)
|
)
|
||||||
|
|
||||||
var usageHintMap = map[string]string{
|
|
||||||
"bash": `# Run this command to configure your shell:
|
|
||||||
# eval $(minikube docker-env)
|
|
||||||
`,
|
|
||||||
"fish": `# Run this command to configure your shell:
|
|
||||||
# eval (minikube docker-env)
|
|
||||||
`,
|
|
||||||
"powershell": `# Run this command to configure your shell:
|
|
||||||
# & minikube docker-env | Invoke-Expression
|
|
||||||
`,
|
|
||||||
"cmd": `REM Run this command to configure your shell:
|
|
||||||
REM @FOR /f "tokens=*" %i IN ('minikube docker-env') DO @%i
|
|
||||||
`,
|
|
||||||
"emacs": `;; Run this command to configure your shell:
|
|
||||||
;; (with-temp-buffer (shell-command "minikube docker-env" (current-buffer)) (eval-buffer))
|
|
||||||
`,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShellConfig represents the shell config
|
// ShellConfig represents the shell config
|
||||||
type ShellConfig struct {
|
type ShellConfig struct {
|
||||||
Prefix string
|
Prefix string
|
||||||
Delimiter string
|
Delimiter string
|
||||||
Suffix string
|
Suffix string
|
||||||
DockerCertPath string
|
DockerCertPath string
|
||||||
DockerHost string
|
DockerHost string
|
||||||
DockerTLSVerify string
|
DockerTLSVerify string
|
||||||
UsageHint string
|
MinikubeDockerdProfile string
|
||||||
NoProxyVar string
|
UsageHint string
|
||||||
NoProxyValue string
|
NoProxyVar string
|
||||||
|
NoProxyValue string
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -144,7 +128,32 @@ type NoProxyGetter interface {
|
||||||
// EnvNoProxyGetter gets the no_proxy variable, using environment
|
// EnvNoProxyGetter gets the no_proxy variable, using environment
|
||||||
type EnvNoProxyGetter struct{}
|
type EnvNoProxyGetter struct{}
|
||||||
|
|
||||||
func generateUsageHint(userShell string) string {
|
func generateUsageHint(profile string, userShell string) string {
|
||||||
|
const usgPlz = "Please run command bellow to point your shell to minikube's docker-daemon :"
|
||||||
|
var usgCmd = fmt.Sprintf("minikube -p %s docker-env", profile)
|
||||||
|
var usageHintMap = map[string]string{
|
||||||
|
"bash": fmt.Sprintf(`
|
||||||
|
# %s
|
||||||
|
# eval $(%s)
|
||||||
|
`, usgPlz, usgCmd),
|
||||||
|
"fish": fmt.Sprintf(`
|
||||||
|
# %s
|
||||||
|
# eval (%s)
|
||||||
|
`, usgPlz, usgCmd),
|
||||||
|
"powershell": fmt.Sprintf(`
|
||||||
|
# %s
|
||||||
|
# & %s | Invoke-Expression
|
||||||
|
`, usgPlz, usgCmd),
|
||||||
|
"cmd": fmt.Sprintf(`
|
||||||
|
REM %s
|
||||||
|
REM @FOR /f "tokens=*" %%i IN ('%s') DO @%%i
|
||||||
|
`, usgPlz, usgCmd),
|
||||||
|
"emacs": fmt.Sprintf(`
|
||||||
|
;; %s
|
||||||
|
;; (with-temp-buffer (shell-command "%s" (current-buffer)) (eval-buffer))
|
||||||
|
`, usgPlz, usgCmd),
|
||||||
|
}
|
||||||
|
|
||||||
hint, ok := usageHintMap[userShell]
|
hint, ok := usageHintMap[userShell]
|
||||||
if !ok {
|
if !ok {
|
||||||
return usageHintMap["bash"]
|
return usageHintMap["bash"]
|
||||||
|
@ -154,7 +163,7 @@ func generateUsageHint(userShell string) string {
|
||||||
|
|
||||||
func shellCfgSet(api libmachine.API) (*ShellConfig, error) {
|
func shellCfgSet(api libmachine.API) (*ShellConfig, error) {
|
||||||
|
|
||||||
envMap, err := cluster.GetHostDockerEnv(api)
|
envMap, err := cluster.GetNodeDockerEnv(api)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -165,10 +174,11 @@ func shellCfgSet(api libmachine.API) (*ShellConfig, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
shellCfg := &ShellConfig{
|
shellCfg := &ShellConfig{
|
||||||
DockerCertPath: envMap["DOCKER_CERT_PATH"],
|
DockerCertPath: envMap[constants.DockerCertPathEnv],
|
||||||
DockerHost: envMap["DOCKER_HOST"],
|
DockerHost: envMap[constants.DockerHostEnv],
|
||||||
DockerTLSVerify: envMap["DOCKER_TLS_VERIFY"],
|
DockerTLSVerify: envMap[constants.DockerTLSVerifyEnv],
|
||||||
UsageHint: generateUsageHint(userShell),
|
MinikubeDockerdProfile: envMap[constants.MinikubeActiveDockerdEnv],
|
||||||
|
UsageHint: generateUsageHint(viper.GetString(config.MachineProfile), userShell),
|
||||||
}
|
}
|
||||||
|
|
||||||
if noProxy {
|
if noProxy {
|
||||||
|
@ -237,7 +247,7 @@ func shellCfgUnset() (*ShellConfig, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
shellCfg := &ShellConfig{
|
shellCfg := &ShellConfig{
|
||||||
UsageHint: generateUsageHint(userShell),
|
UsageHint: generateUsageHint(viper.GetString(config.MachineProfile), userShell),
|
||||||
}
|
}
|
||||||
|
|
||||||
if noProxy {
|
if noProxy {
|
||||||
|
|
|
@ -59,13 +59,14 @@ var defaultAPI = &tests.MockAPI{
|
||||||
// Most of the shell cfg isn't configurable
|
// Most of the shell cfg isn't configurable
|
||||||
func newShellCfg(shell, prefix, suffix, delim string) *ShellConfig {
|
func newShellCfg(shell, prefix, suffix, delim string) *ShellConfig {
|
||||||
return &ShellConfig{
|
return &ShellConfig{
|
||||||
DockerCertPath: localpath.MakeMiniPath("certs"),
|
DockerCertPath: localpath.MakeMiniPath("certs"),
|
||||||
DockerTLSVerify: "1",
|
DockerTLSVerify: "1",
|
||||||
DockerHost: "tcp://127.0.0.1:2376",
|
DockerHost: "tcp://127.0.0.1:2376",
|
||||||
UsageHint: generateUsageHint(shell),
|
UsageHint: generateUsageHint("minikube", shell),
|
||||||
Prefix: prefix,
|
Prefix: prefix,
|
||||||
Suffix: suffix,
|
Suffix: suffix,
|
||||||
Delimiter: delim,
|
Delimiter: delim,
|
||||||
|
MinikubeDockerdProfile: "minikube",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,15 +142,16 @@ func TestShellCfgSet(t *testing.T) {
|
||||||
noProxyValue: "",
|
noProxyValue: "",
|
||||||
noProxyFlag: true,
|
noProxyFlag: true,
|
||||||
expectedShellCfg: &ShellConfig{
|
expectedShellCfg: &ShellConfig{
|
||||||
DockerCertPath: localpath.MakeMiniPath("certs"),
|
DockerCertPath: localpath.MakeMiniPath("certs"),
|
||||||
DockerTLSVerify: "1",
|
DockerTLSVerify: "1",
|
||||||
DockerHost: "tcp://127.0.0.1:2376",
|
DockerHost: "tcp://127.0.0.1:2376",
|
||||||
UsageHint: usageHintMap["bash"],
|
UsageHint: generateUsageHint("minikube", "bash"),
|
||||||
Prefix: bashSetPfx,
|
Prefix: bashSetPfx,
|
||||||
Suffix: bashSetSfx,
|
Suffix: bashSetSfx,
|
||||||
Delimiter: bashSetDelim,
|
Delimiter: bashSetDelim,
|
||||||
NoProxyVar: "NO_PROXY",
|
NoProxyVar: "NO_PROXY",
|
||||||
NoProxyValue: "127.0.0.1",
|
NoProxyValue: "127.0.0.1",
|
||||||
|
MinikubeDockerdProfile: "minikube",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -160,15 +162,16 @@ func TestShellCfgSet(t *testing.T) {
|
||||||
noProxyValue: "",
|
noProxyValue: "",
|
||||||
noProxyFlag: true,
|
noProxyFlag: true,
|
||||||
expectedShellCfg: &ShellConfig{
|
expectedShellCfg: &ShellConfig{
|
||||||
DockerCertPath: localpath.MakeMiniPath("certs"),
|
DockerCertPath: localpath.MakeMiniPath("certs"),
|
||||||
DockerTLSVerify: "1",
|
DockerTLSVerify: "1",
|
||||||
DockerHost: "tcp://127.0.0.1:2376",
|
DockerHost: "tcp://127.0.0.1:2376",
|
||||||
UsageHint: usageHintMap["bash"],
|
UsageHint: generateUsageHint("minikube", "bash"),
|
||||||
Prefix: bashSetPfx,
|
Prefix: bashSetPfx,
|
||||||
Suffix: bashSetSfx,
|
Suffix: bashSetSfx,
|
||||||
Delimiter: bashSetDelim,
|
Delimiter: bashSetDelim,
|
||||||
NoProxyVar: "no_proxy",
|
NoProxyVar: "no_proxy",
|
||||||
NoProxyValue: "127.0.0.1",
|
NoProxyValue: "127.0.0.1",
|
||||||
|
MinikubeDockerdProfile: "minikube",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -179,15 +182,16 @@ func TestShellCfgSet(t *testing.T) {
|
||||||
noProxyValue: "127.0.0.1",
|
noProxyValue: "127.0.0.1",
|
||||||
noProxyFlag: true,
|
noProxyFlag: true,
|
||||||
expectedShellCfg: &ShellConfig{
|
expectedShellCfg: &ShellConfig{
|
||||||
DockerCertPath: localpath.MakeMiniPath("certs"),
|
DockerCertPath: localpath.MakeMiniPath("certs"),
|
||||||
DockerTLSVerify: "1",
|
DockerTLSVerify: "1",
|
||||||
DockerHost: "tcp://127.0.0.1:2376",
|
DockerHost: "tcp://127.0.0.1:2376",
|
||||||
UsageHint: usageHintMap["bash"],
|
UsageHint: generateUsageHint("minikube", "bash"),
|
||||||
Prefix: bashSetPfx,
|
Prefix: bashSetPfx,
|
||||||
Suffix: bashSetSfx,
|
Suffix: bashSetSfx,
|
||||||
Delimiter: bashSetDelim,
|
Delimiter: bashSetDelim,
|
||||||
NoProxyVar: "no_proxy",
|
NoProxyVar: "no_proxy",
|
||||||
NoProxyValue: "127.0.0.1",
|
NoProxyValue: "127.0.0.1",
|
||||||
|
MinikubeDockerdProfile: "minikube",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -198,15 +202,16 @@ func TestShellCfgSet(t *testing.T) {
|
||||||
noProxyValue: "0.0.0.0",
|
noProxyValue: "0.0.0.0",
|
||||||
noProxyFlag: true,
|
noProxyFlag: true,
|
||||||
expectedShellCfg: &ShellConfig{
|
expectedShellCfg: &ShellConfig{
|
||||||
DockerCertPath: localpath.MakeMiniPath("certs"),
|
DockerCertPath: localpath.MakeMiniPath("certs"),
|
||||||
DockerTLSVerify: "1",
|
DockerTLSVerify: "1",
|
||||||
DockerHost: "tcp://127.0.0.1:2376",
|
DockerHost: "tcp://127.0.0.1:2376",
|
||||||
UsageHint: usageHintMap["bash"],
|
UsageHint: generateUsageHint("minikube", "bash"),
|
||||||
Prefix: bashSetPfx,
|
Prefix: bashSetPfx,
|
||||||
Suffix: bashSetSfx,
|
Suffix: bashSetSfx,
|
||||||
Delimiter: bashSetDelim,
|
Delimiter: bashSetDelim,
|
||||||
NoProxyVar: "no_proxy",
|
NoProxyVar: "no_proxy",
|
||||||
NoProxyValue: "0.0.0.0,127.0.0.1",
|
NoProxyValue: "0.0.0.0,127.0.0.1",
|
||||||
|
MinikubeDockerdProfile: "minikube",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -217,15 +222,16 @@ func TestShellCfgSet(t *testing.T) {
|
||||||
noProxyValue: "0.0.0.0,127.0.0.1",
|
noProxyValue: "0.0.0.0,127.0.0.1",
|
||||||
noProxyFlag: true,
|
noProxyFlag: true,
|
||||||
expectedShellCfg: &ShellConfig{
|
expectedShellCfg: &ShellConfig{
|
||||||
DockerCertPath: localpath.MakeMiniPath("certs"),
|
DockerCertPath: localpath.MakeMiniPath("certs"),
|
||||||
DockerTLSVerify: "1",
|
DockerTLSVerify: "1",
|
||||||
DockerHost: "tcp://127.0.0.1:2376",
|
DockerHost: "tcp://127.0.0.1:2376",
|
||||||
UsageHint: usageHintMap["bash"],
|
UsageHint: generateUsageHint("minikube", "bash"),
|
||||||
Prefix: bashSetPfx,
|
Prefix: bashSetPfx,
|
||||||
Suffix: bashSetSfx,
|
Suffix: bashSetSfx,
|
||||||
Delimiter: bashSetDelim,
|
Delimiter: bashSetDelim,
|
||||||
NoProxyVar: "no_proxy",
|
NoProxyVar: "no_proxy",
|
||||||
NoProxyValue: "0.0.0.0,127.0.0.1",
|
NoProxyValue: "0.0.0.0,127.0.0.1",
|
||||||
|
MinikubeDockerdProfile: "minikube",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -254,6 +260,7 @@ func TestShellCfgSet(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestShellCfgUnset(t *testing.T) {
|
func TestShellCfgUnset(t *testing.T) {
|
||||||
|
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
description string
|
description string
|
||||||
shell string
|
shell string
|
||||||
|
@ -266,7 +273,7 @@ func TestShellCfgUnset(t *testing.T) {
|
||||||
Prefix: bashUnsetPfx,
|
Prefix: bashUnsetPfx,
|
||||||
Suffix: bashUnsetSfx,
|
Suffix: bashUnsetSfx,
|
||||||
Delimiter: bashUnsetDelim,
|
Delimiter: bashUnsetDelim,
|
||||||
UsageHint: usageHintMap["bash"],
|
UsageHint: generateUsageHint("minikube", "bash"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -276,7 +283,7 @@ func TestShellCfgUnset(t *testing.T) {
|
||||||
Prefix: bashUnsetPfx,
|
Prefix: bashUnsetPfx,
|
||||||
Suffix: bashUnsetSfx,
|
Suffix: bashUnsetSfx,
|
||||||
Delimiter: bashUnsetDelim,
|
Delimiter: bashUnsetDelim,
|
||||||
UsageHint: usageHintMap["bash"],
|
UsageHint: generateUsageHint("minikube", "bash"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -286,7 +293,7 @@ func TestShellCfgUnset(t *testing.T) {
|
||||||
Prefix: fishUnsetPfx,
|
Prefix: fishUnsetPfx,
|
||||||
Suffix: fishUnsetSfx,
|
Suffix: fishUnsetSfx,
|
||||||
Delimiter: fishUnsetDelim,
|
Delimiter: fishUnsetDelim,
|
||||||
UsageHint: usageHintMap["fish"],
|
UsageHint: generateUsageHint("minikube", "fish"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -296,7 +303,7 @@ func TestShellCfgUnset(t *testing.T) {
|
||||||
Prefix: psUnsetPfx,
|
Prefix: psUnsetPfx,
|
||||||
Suffix: psUnsetSfx,
|
Suffix: psUnsetSfx,
|
||||||
Delimiter: psUnsetDelim,
|
Delimiter: psUnsetDelim,
|
||||||
UsageHint: usageHintMap["powershell"],
|
UsageHint: generateUsageHint("minikube", "powershell"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -306,7 +313,7 @@ func TestShellCfgUnset(t *testing.T) {
|
||||||
Prefix: cmdUnsetPfx,
|
Prefix: cmdUnsetPfx,
|
||||||
Suffix: cmdUnsetSfx,
|
Suffix: cmdUnsetSfx,
|
||||||
Delimiter: cmdUnsetDelim,
|
Delimiter: cmdUnsetDelim,
|
||||||
UsageHint: usageHintMap["cmd"],
|
UsageHint: generateUsageHint("minikube", "cmd"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -316,7 +323,7 @@ func TestShellCfgUnset(t *testing.T) {
|
||||||
Prefix: emacsUnsetPfx,
|
Prefix: emacsUnsetPfx,
|
||||||
Suffix: emacsUnsetSfx,
|
Suffix: emacsUnsetSfx,
|
||||||
Delimiter: emacsUnsetDelim,
|
Delimiter: emacsUnsetDelim,
|
||||||
UsageHint: usageHintMap["emacs"],
|
UsageHint: generateUsageHint("minikube", "emacs"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,6 @@ import (
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"k8s.io/minikube/pkg/minikube/config"
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
|
||||||
"k8s.io/minikube/pkg/minikube/constants"
|
"k8s.io/minikube/pkg/minikube/constants"
|
||||||
"k8s.io/minikube/pkg/minikube/exit"
|
"k8s.io/minikube/pkg/minikube/exit"
|
||||||
"k8s.io/minikube/pkg/minikube/machine"
|
"k8s.io/minikube/pkg/minikube/machine"
|
||||||
|
@ -38,10 +37,11 @@ import (
|
||||||
var kubectlCmd = &cobra.Command{
|
var kubectlCmd = &cobra.Command{
|
||||||
Use: "kubectl",
|
Use: "kubectl",
|
||||||
Short: "Run kubectl",
|
Short: "Run kubectl",
|
||||||
Long: `Run the kubernetes client, download it if necessary.
|
Long: `Run the kubernetes client, download it if necessary. Remember -- after kubectl!
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
minikube kubectl -- --help
|
minikube kubectl -- --help
|
||||||
kubectl get pods --namespace kube-system`,
|
minikube kubectl -- get pods --namespace kube-system`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
api, err := machine.NewAPIClient()
|
api, err := machine.NewAPIClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -50,8 +50,8 @@ kubectl get pods --namespace kube-system`,
|
||||||
}
|
}
|
||||||
defer api.Close()
|
defer api.Close()
|
||||||
|
|
||||||
cc, err := pkg_config.Load(viper.GetString(config.MachineProfile))
|
cc, err := config.Load(viper.GetString(config.MachineProfile))
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !config.IsNotExist(err) {
|
||||||
out.ErrLn("Error loading profile config: %v", err)
|
out.ErrLn("Error loading profile config: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -108,7 +108,6 @@ var mountCmd = &cobra.Command{
|
||||||
exit.WithError("Error getting config", err)
|
exit.WithError("Error getting config", err)
|
||||||
}
|
}
|
||||||
host, err := api.Load(cc.Name)
|
host, err := api.Load(cc.Name)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("Error loading api", err)
|
exit.WithError("Error loading api", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,103 @@
|
||||||
|
/*
|
||||||
|
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
|
"k8s.io/minikube/pkg/minikube/cluster"
|
||||||
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
|
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||||
|
"k8s.io/minikube/pkg/minikube/exit"
|
||||||
|
"k8s.io/minikube/pkg/minikube/machine"
|
||||||
|
"k8s.io/minikube/pkg/minikube/out"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
namespaces []string
|
||||||
|
allNamespaces bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// pauseCmd represents the docker-pause command
|
||||||
|
var pauseCmd = &cobra.Command{
|
||||||
|
Use: "pause",
|
||||||
|
Short: "pause containers",
|
||||||
|
Run: runPause,
|
||||||
|
}
|
||||||
|
|
||||||
|
func runPause(cmd *cobra.Command, args []string) {
|
||||||
|
cname := viper.GetString(config.MachineProfile)
|
||||||
|
api, err := machine.NewAPIClient()
|
||||||
|
if err != nil {
|
||||||
|
exit.WithError("Error getting client", err)
|
||||||
|
}
|
||||||
|
defer api.Close()
|
||||||
|
cc, err := config.Load(cname)
|
||||||
|
|
||||||
|
if err != nil && !config.IsNotExist(err) {
|
||||||
|
exit.WithError("Error loading profile config", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
out.ErrT(out.Meh, `"{{.name}}" profile does not exist`, out.V{"name": cname})
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Infof("config: %+v", cc)
|
||||||
|
host, err := cluster.CheckIfHostExistsAndLoad(api, cname)
|
||||||
|
if err != nil {
|
||||||
|
exit.WithError("Error getting host", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := machine.CommandRunner(host)
|
||||||
|
if err != nil {
|
||||||
|
exit.WithError("Failed to get command runner", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r})
|
||||||
|
if err != nil {
|
||||||
|
exit.WithError("Failed runtime", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Infof("namespaces: %v keys: %v", namespaces, viper.AllSettings())
|
||||||
|
if allNamespaces {
|
||||||
|
namespaces = nil //all
|
||||||
|
} else if len(namespaces) == 0 {
|
||||||
|
exit.WithCodeT(exit.BadUsage, "Use -A to specify all namespaces")
|
||||||
|
}
|
||||||
|
|
||||||
|
ids, err := cluster.Pause(cr, r, namespaces)
|
||||||
|
if err != nil {
|
||||||
|
exit.WithError("Pause", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if namespaces == nil {
|
||||||
|
out.T(out.Unpause, "Paused kubelet and {{.count}} containers", out.V{"count": len(ids)})
|
||||||
|
} else {
|
||||||
|
out.T(out.Unpause, "Paused kubelet and {{.count}} containers in: {{.namespaces}}", out.V{"count": len(ids), "namespaces": strings.Join(namespaces, ", ")})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
pauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", cluster.DefaultNamespaces, "namespaces to pause")
|
||||||
|
pauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, pause all namespaces")
|
||||||
|
}
|
|
@ -32,7 +32,6 @@ import (
|
||||||
"k8s.io/kubectl/pkg/util/templates"
|
"k8s.io/kubectl/pkg/util/templates"
|
||||||
configCmd "k8s.io/minikube/cmd/minikube/cmd/config"
|
configCmd "k8s.io/minikube/cmd/minikube/cmd/config"
|
||||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||||
"k8s.io/minikube/pkg/minikube/bootstrapper/kicbs"
|
|
||||||
"k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm"
|
"k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm"
|
||||||
"k8s.io/minikube/pkg/minikube/config"
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
"k8s.io/minikube/pkg/minikube/constants"
|
"k8s.io/minikube/pkg/minikube/constants"
|
||||||
|
@ -173,6 +172,8 @@ func init() {
|
||||||
stopCmd,
|
stopCmd,
|
||||||
deleteCmd,
|
deleteCmd,
|
||||||
dashboardCmd,
|
dashboardCmd,
|
||||||
|
pauseCmd,
|
||||||
|
unpauseCmd,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -274,16 +275,9 @@ func getClusterBootstrapper(api libmachine.API, bootstrapperName string) (bootst
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "getting a new kubeadm bootstrapper")
|
return nil, errors.Wrap(err, "getting a new kubeadm bootstrapper")
|
||||||
}
|
}
|
||||||
case bootstrapper.KIC:
|
|
||||||
b, err = kicbs.NewBootstrapper(api)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "getting a new kic bootstrapper")
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown bootstrapper: %s", bootstrapperName)
|
return nil, fmt.Errorf("unknown bootstrapper: %s", bootstrapperName)
|
||||||
}
|
}
|
||||||
|
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,8 +25,10 @@ import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"github.com/pkg/browser"
|
"github.com/pkg/browser"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"k8s.io/minikube/pkg/minikube/cluster"
|
"k8s.io/minikube/pkg/minikube/cluster"
|
||||||
|
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||||
"k8s.io/minikube/pkg/minikube/exit"
|
"k8s.io/minikube/pkg/minikube/exit"
|
||||||
"k8s.io/minikube/pkg/minikube/machine"
|
"k8s.io/minikube/pkg/minikube/machine"
|
||||||
"k8s.io/minikube/pkg/minikube/out"
|
"k8s.io/minikube/pkg/minikube/out"
|
||||||
|
@ -71,7 +73,8 @@ var serviceCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
defer api.Close()
|
defer api.Close()
|
||||||
|
|
||||||
if !cluster.IsMinikubeRunning(api) {
|
profileName := viper.GetString(pkg_config.MachineProfile)
|
||||||
|
if !cluster.IsHostRunning(api, profileName) {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -46,14 +46,13 @@ import (
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
|
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
|
||||||
pkgaddons "k8s.io/minikube/pkg/addons"
|
"k8s.io/minikube/pkg/addons"
|
||||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
|
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
|
||||||
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
|
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
|
||||||
"k8s.io/minikube/pkg/minikube/cluster"
|
"k8s.io/minikube/pkg/minikube/cluster"
|
||||||
"k8s.io/minikube/pkg/minikube/command"
|
"k8s.io/minikube/pkg/minikube/command"
|
||||||
"k8s.io/minikube/pkg/minikube/config"
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
cfg "k8s.io/minikube/pkg/minikube/config"
|
|
||||||
"k8s.io/minikube/pkg/minikube/constants"
|
"k8s.io/minikube/pkg/minikube/constants"
|
||||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||||
"k8s.io/minikube/pkg/minikube/driver"
|
"k8s.io/minikube/pkg/minikube/driver"
|
||||||
|
@ -65,6 +64,7 @@ import (
|
||||||
"k8s.io/minikube/pkg/minikube/notify"
|
"k8s.io/minikube/pkg/minikube/notify"
|
||||||
"k8s.io/minikube/pkg/minikube/out"
|
"k8s.io/minikube/pkg/minikube/out"
|
||||||
"k8s.io/minikube/pkg/minikube/proxy"
|
"k8s.io/minikube/pkg/minikube/proxy"
|
||||||
|
"k8s.io/minikube/pkg/minikube/registry"
|
||||||
"k8s.io/minikube/pkg/minikube/translate"
|
"k8s.io/minikube/pkg/minikube/translate"
|
||||||
pkgutil "k8s.io/minikube/pkg/util"
|
pkgutil "k8s.io/minikube/pkg/util"
|
||||||
"k8s.io/minikube/pkg/util/lock"
|
"k8s.io/minikube/pkg/util/lock"
|
||||||
|
@ -104,7 +104,6 @@ const (
|
||||||
imageMirrorCountry = "image-mirror-country"
|
imageMirrorCountry = "image-mirror-country"
|
||||||
mountString = "mount-string"
|
mountString = "mount-string"
|
||||||
disableDriverMounts = "disable-driver-mounts"
|
disableDriverMounts = "disable-driver-mounts"
|
||||||
addons = "addons"
|
|
||||||
cacheImages = "cache-images"
|
cacheImages = "cache-images"
|
||||||
uuid = "uuid"
|
uuid = "uuid"
|
||||||
vpnkitSock = "hyperkit-vpnkit-sock"
|
vpnkitSock = "hyperkit-vpnkit-sock"
|
||||||
|
@ -136,7 +135,7 @@ var (
|
||||||
apiServerNames []string
|
apiServerNames []string
|
||||||
addonList []string
|
addonList []string
|
||||||
apiServerIPs []net.IP
|
apiServerIPs []net.IP
|
||||||
extraOptions cfg.ExtraOptionSlice
|
extraOptions config.ExtraOptionSlice
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -159,7 +158,7 @@ func initMinikubeFlags() {
|
||||||
|
|
||||||
startCmd.Flags().Bool(force, false, "Force minikube to perform possibly dangerous operations")
|
startCmd.Flags().Bool(force, false, "Force minikube to perform possibly dangerous operations")
|
||||||
startCmd.Flags().Bool(interactive, true, "Allow user prompts for more information")
|
startCmd.Flags().Bool(interactive, true, "Allow user prompts for more information")
|
||||||
startCmd.Flags().Bool(dryRun, false, "dry-run mode. Validates configuration, but does does not mutate system state")
|
startCmd.Flags().Bool(dryRun, false, "dry-run mode. Validates configuration, but does not mutate system state")
|
||||||
|
|
||||||
startCmd.Flags().Int(cpus, 2, "Number of CPUs allocated to the minikube VM.")
|
startCmd.Flags().Int(cpus, 2, "Number of CPUs allocated to the minikube VM.")
|
||||||
startCmd.Flags().String(memory, defaultMemorySize, "Amount of RAM allocated to the minikube VM (format: <number>[<unit>], where unit = b, k, m or g).")
|
startCmd.Flags().String(memory, defaultMemorySize, "Amount of RAM allocated to the minikube VM (format: <number>[<unit>], where unit = b, k, m or g).")
|
||||||
|
@ -172,7 +171,7 @@ func initMinikubeFlags() {
|
||||||
startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd).")
|
startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd).")
|
||||||
startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.")
|
startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.")
|
||||||
startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.")
|
startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.")
|
||||||
startCmd.Flags().StringArrayVar(&addonList, addons, nil, "Enable addons. see `minikube addons list` for a list of valid addon names.")
|
startCmd.Flags().StringArrayVar(&addonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.")
|
||||||
startCmd.Flags().String(criSocket, "", "The cri socket path to be used.")
|
startCmd.Flags().String(criSocket, "", "The cri socket path to be used.")
|
||||||
startCmd.Flags().String(networkPlugin, "", "The name of the network plugin.")
|
startCmd.Flags().String(networkPlugin, "", "The name of the network plugin.")
|
||||||
startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\".")
|
startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\".")
|
||||||
|
@ -200,7 +199,7 @@ func initKubernetesFlags() {
|
||||||
|
|
||||||
// initDriverFlags inits the commandline flags for vm drivers
|
// initDriverFlags inits the commandline flags for vm drivers
|
||||||
func initDriverFlags() {
|
func initDriverFlags() {
|
||||||
startCmd.Flags().String("vm-driver", "", fmt.Sprintf("Driver is one of: %v (defaults to auto-detect)", driver.SupportedDrivers()))
|
startCmd.Flags().String("vm-driver", "", fmt.Sprintf("Driver is one of: %v (defaults to auto-detect)", driver.DisplaySupportedDrivers()))
|
||||||
startCmd.Flags().Bool(disableDriverMounts, false, "Disables the filesystem mounts provided by the hypervisors")
|
startCmd.Flags().Bool(disableDriverMounts, false, "Disables the filesystem mounts provided by the hypervisors")
|
||||||
|
|
||||||
// kvm2
|
// kvm2
|
||||||
|
@ -295,8 +294,8 @@ func runStart(cmd *cobra.Command, args []string) {
|
||||||
registryMirror = viper.GetStringSlice("registry_mirror")
|
registryMirror = viper.GetStringSlice("registry_mirror")
|
||||||
}
|
}
|
||||||
|
|
||||||
existing, err := cfg.Load(viper.GetString(config.MachineProfile))
|
existing, err := config.Load(viper.GetString(config.MachineProfile))
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !config.IsNotExist(err) {
|
||||||
exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err})
|
exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -317,7 +316,7 @@ func runStart(cmd *cobra.Command, args []string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
k8sVersion, isUpgrade := getKubernetesVersion(existing)
|
k8sVersion, isUpgrade := getKubernetesVersion(existing)
|
||||||
config, err := generateCfgFromFlags(cmd, k8sVersion, driverName)
|
mc, n, err := generateCfgFromFlags(cmd, k8sVersion, driverName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("Failed to generate config", err)
|
exit.WithError("Failed to generate config", err)
|
||||||
}
|
}
|
||||||
|
@ -328,7 +327,7 @@ func runStart(cmd *cobra.Command, args []string) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheISO(&config, driverName)
|
cacheISO(&mc, driverName)
|
||||||
|
|
||||||
if viper.GetBool(nativeSSH) {
|
if viper.GetBool(nativeSSH) {
|
||||||
ssh.SetDefaultClient(ssh.Native)
|
ssh.SetDefaultClient(ssh.Native)
|
||||||
|
@ -338,38 +337,42 @@ func runStart(cmd *cobra.Command, args []string) {
|
||||||
|
|
||||||
// Now that the ISO is downloaded, pull images in the background while the VM boots.
|
// Now that the ISO is downloaded, pull images in the background while the VM boots.
|
||||||
var cacheGroup errgroup.Group
|
var cacheGroup errgroup.Group
|
||||||
beginCacheRequiredImages(&cacheGroup, config.KubernetesConfig.ImageRepository, k8sVersion)
|
beginCacheRequiredImages(&cacheGroup, mc.KubernetesConfig.ImageRepository, k8sVersion)
|
||||||
|
|
||||||
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
|
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
|
||||||
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
|
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
|
||||||
if err := saveConfig(&config); err != nil {
|
if err := saveConfig(&mc); err != nil {
|
||||||
exit.WithError("Failed to save config", err)
|
exit.WithError("Failed to save config", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// exits here in case of --download-only option.
|
// exits here in case of --download-only option.
|
||||||
handleDownloadOnly(&cacheGroup, k8sVersion)
|
handleDownloadOnly(&cacheGroup, k8sVersion)
|
||||||
mRunner, preExists, machineAPI, host := startMachine(&config)
|
mRunner, preExists, machineAPI, host := startMachine(&mc, &n)
|
||||||
defer machineAPI.Close()
|
defer machineAPI.Close()
|
||||||
// configure the runtime (docker, containerd, crio)
|
// configure the runtime (docker, containerd, crio)
|
||||||
cr := configureRuntimes(mRunner, driverName, config.KubernetesConfig)
|
cr := configureRuntimes(mRunner, driverName, mc.KubernetesConfig)
|
||||||
showVersionInfo(k8sVersion, cr)
|
showVersionInfo(k8sVersion, cr)
|
||||||
waitCacheRequiredImages(&cacheGroup)
|
waitCacheRequiredImages(&cacheGroup)
|
||||||
|
|
||||||
// Must be written before bootstrap, otherwise health checks may flake due to stale IP
|
// Must be written before bootstrap, otherwise health checks may flake due to stale IP
|
||||||
kubeconfig, err := setupKubeconfig(host, &config, config.Name)
|
kubeconfig, err := setupKubeconfig(host, &mc, &n, mc.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("Failed to setup kubeconfig", err)
|
exit.WithError("Failed to setup kubeconfig", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// setup kubeadm (must come after setupKubeconfig)
|
// setup kubeadm (must come after setupKubeconfig)
|
||||||
bs := setupKubeAdm(machineAPI, config)
|
bs := setupKubeAdm(machineAPI, mc, n)
|
||||||
|
|
||||||
// pull images or restart cluster
|
// pull images or restart cluster
|
||||||
bootstrapCluster(bs, cr, mRunner, config.KubernetesConfig, preExists, isUpgrade)
|
bootstrapCluster(bs, cr, mRunner, mc, preExists, isUpgrade)
|
||||||
configureMounts()
|
configureMounts()
|
||||||
|
|
||||||
// enable addons with start command
|
// enable addons, both old and new!
|
||||||
enableAddons()
|
existingAddons := map[string]bool{}
|
||||||
|
if existing != nil && existing.Addons != nil {
|
||||||
|
existingAddons = existing.Addons
|
||||||
|
}
|
||||||
|
addons.Start(viper.GetString(config.MachineProfile), existingAddons, addonList)
|
||||||
|
|
||||||
if err = cacheAndLoadImagesInConfig(); err != nil {
|
if err = cacheAndLoadImagesInConfig(); err != nil {
|
||||||
out.T(out.FailureType, "Unable to load cached images from config file.")
|
out.T(out.FailureType, "Unable to load cached images from config file.")
|
||||||
|
@ -382,11 +385,11 @@ func runStart(cmd *cobra.Command, args []string) {
|
||||||
|
|
||||||
// Skip pre-existing, because we already waited for health
|
// Skip pre-existing, because we already waited for health
|
||||||
if viper.GetBool(waitUntilHealthy) && !preExists {
|
if viper.GetBool(waitUntilHealthy) && !preExists {
|
||||||
if err := bs.WaitForCluster(config.KubernetesConfig, viper.GetDuration(waitTimeout)); err != nil {
|
if err := bs.WaitForCluster(mc, viper.GetDuration(waitTimeout)); err != nil {
|
||||||
exit.WithError("Wait failed", err)
|
exit.WithError("Wait failed", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := showKubectlInfo(kubeconfig, k8sVersion, config.Name); err != nil {
|
if err := showKubectlInfo(kubeconfig, k8sVersion, mc.Name); err != nil {
|
||||||
glog.Errorf("kubectl info: %v", err)
|
glog.Errorf("kubectl info: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -400,27 +403,18 @@ func updateDriver(driverName string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func cacheISO(config *cfg.MachineConfig, driverName string) {
|
func cacheISO(cfg *config.MachineConfig, driverName string) {
|
||||||
if !driver.BareMetal(driverName) && !driver.IsKIC(driverName) {
|
if !driver.BareMetal(driverName) && !driver.IsKIC(driverName) {
|
||||||
if err := cluster.CacheISO(*config); err != nil {
|
if err := cluster.CacheISO(*cfg); err != nil {
|
||||||
exit.WithError("Failed to cache ISO", err)
|
exit.WithError("Failed to cache ISO", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func enableAddons() {
|
|
||||||
for _, a := range addonList {
|
|
||||||
err := pkgaddons.Set(a, "true", viper.GetString(config.MachineProfile))
|
|
||||||
if err != nil {
|
|
||||||
exit.WithError("addon enable failed", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func displayVersion(version string) {
|
func displayVersion(version string) {
|
||||||
prefix := ""
|
prefix := ""
|
||||||
if viper.GetString(cfg.MachineProfile) != constants.DefaultMachineName {
|
if viper.GetString(config.MachineProfile) != constants.DefaultMachineName {
|
||||||
prefix = fmt.Sprintf("[%s] ", viper.GetString(cfg.MachineProfile))
|
prefix = fmt.Sprintf("[%s] ", viper.GetString(config.MachineProfile))
|
||||||
}
|
}
|
||||||
|
|
||||||
versionState := out.Happy
|
versionState := out.Happy
|
||||||
|
@ -443,22 +437,18 @@ func displayEnviron(env []string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupKubeconfig(h *host.Host, c *cfg.MachineConfig, clusterName string) (*kubeconfig.Settings, error) {
|
func setupKubeconfig(h *host.Host, c *config.MachineConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) {
|
||||||
addr := ""
|
addr, err := h.Driver.GetURL()
|
||||||
var err error
|
if err != nil {
|
||||||
if driver.IsKIC(h.DriverName) {
|
exit.WithError("Failed to get driver URL", err)
|
||||||
addr = fmt.Sprintf("https://%s", net.JoinHostPort("127.0.0.1", fmt.Sprint(c.KubernetesConfig.NodePort)))
|
}
|
||||||
} else {
|
if !driver.IsKIC(h.DriverName) {
|
||||||
addr, err = h.Driver.GetURL()
|
|
||||||
if err != nil {
|
|
||||||
exit.WithError("Failed to get driver URL", err)
|
|
||||||
}
|
|
||||||
addr = strings.Replace(addr, "tcp://", "https://", -1)
|
addr = strings.Replace(addr, "tcp://", "https://", -1)
|
||||||
addr = strings.Replace(addr, ":2376", ":"+strconv.Itoa(c.KubernetesConfig.NodePort), -1)
|
addr = strings.Replace(addr, ":2376", ":"+strconv.Itoa(n.Port), -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.KubernetesConfig.APIServerName != constants.APIServerName {
|
if c.KubernetesConfig.APIServerName != constants.APIServerName {
|
||||||
addr = strings.Replace(addr, c.KubernetesConfig.NodeIP, c.KubernetesConfig.APIServerName, -1)
|
addr = strings.Replace(addr, n.IP, c.KubernetesConfig.APIServerName, -1)
|
||||||
}
|
}
|
||||||
kcs := &kubeconfig.Settings{
|
kcs := &kubeconfig.Settings{
|
||||||
ClusterName: clusterName,
|
ClusterName: clusterName,
|
||||||
|
@ -494,12 +484,12 @@ func handleDownloadOnly(cacheGroup *errgroup.Group, k8sVersion string) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func startMachine(config *cfg.MachineConfig) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) {
|
func startMachine(cfg *config.MachineConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) {
|
||||||
m, err := machine.NewAPIClient()
|
m, err := machine.NewAPIClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("Failed to get machine client", err)
|
exit.WithError("Failed to get machine client", err)
|
||||||
}
|
}
|
||||||
host, preExists = startHost(m, *config)
|
host, preExists = startHost(m, *cfg)
|
||||||
runner, err = machine.CommandRunner(host)
|
runner, err = machine.CommandRunner(host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("Failed to get command runner", err)
|
exit.WithError("Failed to get command runner", err)
|
||||||
|
@ -513,8 +503,9 @@ func startMachine(config *cfg.MachineConfig) (runner command.Runner, preExists b
|
||||||
out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip})
|
out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip})
|
||||||
}
|
}
|
||||||
// Save IP to configuration file for subsequent use
|
// Save IP to configuration file for subsequent use
|
||||||
config.KubernetesConfig.NodeIP = ip
|
node.IP = ip
|
||||||
if err := saveConfig(config); err != nil {
|
|
||||||
|
if err := saveNodeToConfig(cfg, node); err != nil {
|
||||||
exit.WithError("Failed to save config", err)
|
exit.WithError("Failed to save config", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -576,30 +567,37 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func selectDriver(existing *cfg.MachineConfig) string {
|
func selectDriver(existing *config.MachineConfig) string {
|
||||||
name := viper.GetString("vm-driver")
|
name := viper.GetString("vm-driver")
|
||||||
glog.Infof("selectDriver: flag=%q, old=%v", name, existing)
|
glog.Infof("selectDriver: flag=%q, old=%v", name, existing)
|
||||||
|
|
||||||
driver.SetLibvirtURI(viper.GetString(kvmQemuURI))
|
driver.SetLibvirtURI(viper.GetString(kvmQemuURI))
|
||||||
options := driver.Choices()
|
options := driver.Choices()
|
||||||
pick, alts := driver.Choose(name, options)
|
pick, alts := driver.Choose(name, options)
|
||||||
|
exp := ""
|
||||||
|
if pick.Priority == registry.Experimental {
|
||||||
|
exp = "experimental "
|
||||||
|
}
|
||||||
|
|
||||||
if name != "" {
|
if name != "" {
|
||||||
out.T(out.Sparkle, `Selecting '{{.driver}}' driver from user configuration (alternates: {{.alternates}})`, out.V{"driver": name, "alternates": alts})
|
out.T(out.Sparkle, `Selecting {{.experimental}}'{{.driver}}' driver from user configuration (alternates: {{.alternates}})`, out.V{"experimental": exp, "driver": name, "alternates": alts})
|
||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
|
|
||||||
// By default, the driver is whatever we used last time
|
// By default, the driver is whatever we used last time
|
||||||
if existing != nil {
|
if existing != nil {
|
||||||
pick, alts := driver.Choose(existing.VMDriver, options)
|
pick, alts := driver.Choose(existing.VMDriver, options)
|
||||||
out.T(out.Sparkle, `Selecting '{{.driver}}' driver from existing profile (alternates: {{.alternates}})`, out.V{"driver": existing.VMDriver, "alternates": alts})
|
if pick.Priority == registry.Experimental {
|
||||||
|
exp = "experimental "
|
||||||
|
}
|
||||||
|
out.T(out.Sparkle, `Selecting {{.experimental}}'{{.driver}}' driver from existing profile (alternates: {{.alternates}})`, out.V{"experimental": exp, "driver": existing.VMDriver, "alternates": alts})
|
||||||
return pick.Name
|
return pick.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(options) > 1 {
|
if len(options) > 1 {
|
||||||
out.T(out.Sparkle, `Automatically selected the '{{.driver}}' driver (alternates: {{.alternates}})`, out.V{"driver": pick.Name, "alternates": alts})
|
out.T(out.Sparkle, `Automatically selected the {{.experimental}}'{{.driver}}' driver (alternates: {{.alternates}})`, out.V{"experimental": exp, "driver": pick.Name, "alternates": alts})
|
||||||
} else {
|
} else {
|
||||||
out.T(out.Sparkle, `Automatically selected the '{{.driver}}' driver`, out.V{"driver": pick.Name})
|
out.T(out.Sparkle, `Automatically selected the {{.experimental}}'{{.driver}}' driver`, out.V{"experimental": exp, "driver": pick.Name})
|
||||||
}
|
}
|
||||||
|
|
||||||
if pick.Name == "" {
|
if pick.Name == "" {
|
||||||
|
@ -609,10 +607,10 @@ func selectDriver(existing *cfg.MachineConfig) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateDriver validates that the selected driver appears sane, exits if not
|
// validateDriver validates that the selected driver appears sane, exits if not
|
||||||
func validateDriver(name string, existing *cfg.MachineConfig) {
|
func validateDriver(name string, existing *config.MachineConfig) {
|
||||||
glog.Infof("validating driver %q against %+v", name, existing)
|
glog.Infof("validating driver %q against %+v", name, existing)
|
||||||
if !driver.Supported(name) {
|
if !driver.Supported(name) {
|
||||||
exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS})
|
exit.WithCodeT(exit.Unavailable, "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS})
|
||||||
}
|
}
|
||||||
|
|
||||||
st := driver.Status(name)
|
st := driver.Status(name)
|
||||||
|
@ -646,7 +644,7 @@ func validateDriver(name string, existing *cfg.MachineConfig) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
machineName := viper.GetString(cfg.MachineProfile)
|
machineName := viper.GetString(config.MachineProfile)
|
||||||
h, err := api.Load(machineName)
|
h, err := api.Load(machineName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Warningf("selectDriver api.Load: %v", err)
|
glog.Warningf("selectDriver api.Load: %v", err)
|
||||||
|
@ -725,8 +723,8 @@ func selectImageRepository(mirrorCountry string) (bool, string, error) {
|
||||||
|
|
||||||
// Return a minikube command containing the current profile name
|
// Return a minikube command containing the current profile name
|
||||||
func minikubeCmd() string {
|
func minikubeCmd() string {
|
||||||
if viper.GetString(cfg.MachineProfile) != constants.DefaultMachineName {
|
if viper.GetString(config.MachineProfile) != constants.DefaultMachineName {
|
||||||
return fmt.Sprintf("minikube -p %s", cfg.MachineProfile)
|
return fmt.Sprintf("minikube -p %s", config.MachineProfile)
|
||||||
}
|
}
|
||||||
return "minikube"
|
return "minikube"
|
||||||
}
|
}
|
||||||
|
@ -756,8 +754,8 @@ func validateUser(drvName string) {
|
||||||
if !useForce {
|
if !useForce {
|
||||||
os.Exit(exit.Permissions)
|
os.Exit(exit.Permissions)
|
||||||
}
|
}
|
||||||
_, err = cfg.Load(viper.GetString(config.MachineProfile))
|
_, err = config.Load(viper.GetString(config.MachineProfile))
|
||||||
if err == nil || !os.IsNotExist(err) {
|
if err == nil || !config.IsNotExist(err) {
|
||||||
out.T(out.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete", out.V{"cmd": minikubeCmd()})
|
out.T(out.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete", out.V{"cmd": minikubeCmd()})
|
||||||
}
|
}
|
||||||
if !useForce {
|
if !useForce {
|
||||||
|
@ -810,7 +808,7 @@ func validateFlags(cmd *cobra.Command, drvName string) {
|
||||||
validateMemorySize()
|
validateMemorySize()
|
||||||
|
|
||||||
if driver.BareMetal(drvName) {
|
if driver.BareMetal(drvName) {
|
||||||
if viper.GetString(cfg.MachineProfile) != constants.DefaultMachineName {
|
if viper.GetString(config.MachineProfile) != constants.DefaultMachineName {
|
||||||
exit.WithCodeT(exit.Config, "The 'none' driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/")
|
exit.WithCodeT(exit.Config, "The 'none' driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -831,8 +829,8 @@ func validateFlags(cmd *cobra.Command, drvName string) {
|
||||||
|
|
||||||
// check that kubeadm extra args contain only whitelisted parameters
|
// check that kubeadm extra args contain only whitelisted parameters
|
||||||
for param := range extraOptions.AsMap().Get(bsutil.Kubeadm) {
|
for param := range extraOptions.AsMap().Get(bsutil.Kubeadm) {
|
||||||
if !cfg.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], param) &&
|
if !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], param) &&
|
||||||
!cfg.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], param) {
|
!config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], param) {
|
||||||
exit.UsageT("Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param})
|
exit.UsageT("Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -884,11 +882,11 @@ func waitCacheRequiredImages(g *errgroup.Group) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateCfgFromFlags generates cfg.Config based on flags and supplied arguments
|
// generateCfgFromFlags generates config.Config based on flags and supplied arguments
|
||||||
func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) (cfg.MachineConfig, error) {
|
func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) (config.MachineConfig, config.Node, error) {
|
||||||
r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)})
|
r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cfg.MachineConfig{}, err
|
return config.MachineConfig{}, config.Node{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pick good default values for --network-plugin and --enable-default-cni based on runtime.
|
// Pick good default values for --network-plugin and --enable-default-cni based on runtime.
|
||||||
|
@ -929,8 +927,22 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
||||||
out.T(out.SuccessType, "Using image repository {{.name}}", out.V{"name": repository})
|
out.T(out.SuccessType, "Using image repository {{.name}}", out.V{"name": repository})
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := cfg.MachineConfig{
|
var kubeNodeName string
|
||||||
Name: viper.GetString(cfg.MachineProfile),
|
if drvName != driver.None {
|
||||||
|
kubeNodeName = viper.GetString(config.MachineProfile)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the initial node, which will necessarily be a control plane
|
||||||
|
cp := config.Node{
|
||||||
|
Port: viper.GetInt(apiServerPort),
|
||||||
|
KubernetesVersion: k8sVersion,
|
||||||
|
Name: kubeNodeName,
|
||||||
|
ControlPlane: true,
|
||||||
|
Worker: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := config.MachineConfig{
|
||||||
|
Name: viper.GetString(config.MachineProfile),
|
||||||
KeepContext: viper.GetBool(keepContext),
|
KeepContext: viper.GetBool(keepContext),
|
||||||
EmbedCerts: viper.GetBool(embedCerts),
|
EmbedCerts: viper.GetBool(embedCerts),
|
||||||
MinikubeISO: viper.GetString(isoURL),
|
MinikubeISO: viper.GetString(isoURL),
|
||||||
|
@ -938,7 +950,6 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
||||||
CPUs: viper.GetInt(cpus),
|
CPUs: viper.GetInt(cpus),
|
||||||
DiskSize: pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize)),
|
DiskSize: pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize)),
|
||||||
VMDriver: drvName,
|
VMDriver: drvName,
|
||||||
ContainerRuntime: viper.GetString(containerRuntime),
|
|
||||||
HyperkitVpnKitSock: viper.GetString(vpnkitSock),
|
HyperkitVpnKitSock: viper.GetString(vpnkitSock),
|
||||||
HyperkitVSockPorts: viper.GetStringSlice(vsockPorts),
|
HyperkitVSockPorts: viper.GetStringSlice(vsockPorts),
|
||||||
NFSShare: viper.GetStringSlice(nfsShare),
|
NFSShare: viper.GetStringSlice(nfsShare),
|
||||||
|
@ -961,10 +972,9 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
||||||
HostDNSResolver: viper.GetBool(hostDNSResolver),
|
HostDNSResolver: viper.GetBool(hostDNSResolver),
|
||||||
HostOnlyNicType: viper.GetString(hostOnlyNicType),
|
HostOnlyNicType: viper.GetString(hostOnlyNicType),
|
||||||
NatNicType: viper.GetString(natNicType),
|
NatNicType: viper.GetString(natNicType),
|
||||||
KubernetesConfig: cfg.KubernetesConfig{
|
KubernetesConfig: config.KubernetesConfig{
|
||||||
KubernetesVersion: k8sVersion,
|
KubernetesVersion: k8sVersion,
|
||||||
NodePort: viper.GetInt(apiServerPort),
|
ClusterName: viper.GetString(config.MachineProfile),
|
||||||
NodeName: constants.DefaultNodeName,
|
|
||||||
APIServerName: viper.GetString(apiServerName),
|
APIServerName: viper.GetString(apiServerName),
|
||||||
APIServerNames: apiServerNames,
|
APIServerNames: apiServerNames,
|
||||||
APIServerIPs: apiServerIPs,
|
APIServerIPs: apiServerIPs,
|
||||||
|
@ -979,8 +989,9 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
||||||
ShouldLoadCachedImages: viper.GetBool(cacheImages),
|
ShouldLoadCachedImages: viper.GetBool(cacheImages),
|
||||||
EnableDefaultCNI: selectedEnableDefaultCNI,
|
EnableDefaultCNI: selectedEnableDefaultCNI,
|
||||||
},
|
},
|
||||||
|
Nodes: []config.Node{cp},
|
||||||
}
|
}
|
||||||
return cfg, nil
|
return cfg, cp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// setDockerProxy sets the proxy environment variables in the docker environment.
|
// setDockerProxy sets the proxy environment variables in the docker environment.
|
||||||
|
@ -1036,7 +1047,7 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) {
|
||||||
// prepareNone prepares the user and host for the joy of the "none" driver
|
// prepareNone prepares the user and host for the joy of the "none" driver
|
||||||
func prepareNone() {
|
func prepareNone() {
|
||||||
out.T(out.StartingNone, "Configuring local host environment ...")
|
out.T(out.StartingNone, "Configuring local host environment ...")
|
||||||
if viper.GetBool(cfg.WantNoneDriverWarning) {
|
if viper.GetBool(config.WantNoneDriverWarning) {
|
||||||
out.T(out.Empty, "")
|
out.T(out.Empty, "")
|
||||||
out.WarningT("The 'none' driver provides limited isolation and may reduce system security and reliability.")
|
out.WarningT("The 'none' driver provides limited isolation and may reduce system security and reliability.")
|
||||||
out.WarningT("For more information, see:")
|
out.WarningT("For more information, see:")
|
||||||
|
@ -1063,7 +1074,7 @@ func prepareNone() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// startHost starts a new minikube host using a VM or None
|
// startHost starts a new minikube host using a VM or None
|
||||||
func startHost(api libmachine.API, mc cfg.MachineConfig) (*host.Host, bool) {
|
func startHost(api libmachine.API, mc config.MachineConfig) (*host.Host, bool) {
|
||||||
exists, err := api.Exists(mc.Name)
|
exists, err := api.Exists(mc.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("Failed to check if machine exists", err)
|
exit.WithError("Failed to check if machine exists", err)
|
||||||
|
@ -1111,6 +1122,10 @@ func validateNetwork(h *host.Host, r command.Runner) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func trySSH(h *host.Host, ip string) {
|
func trySSH(h *host.Host, ip string) {
|
||||||
|
if viper.GetBool(force) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
sshAddr := net.JoinHostPort(ip, "22")
|
sshAddr := net.JoinHostPort(ip, "22")
|
||||||
|
|
||||||
dial := func() (err error) {
|
dial := func() (err error) {
|
||||||
|
@ -1127,28 +1142,33 @@ func trySSH(h *host.Host, ip string) {
|
||||||
if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil {
|
if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil {
|
||||||
exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}}
|
exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}}
|
||||||
|
|
||||||
This is likely due to one of two reasons:
|
This is likely due to one of two reasons:
|
||||||
|
|
||||||
- VPN or firewall interference
|
- VPN or firewall interference
|
||||||
- {{.hypervisor}} network configuration issue
|
- {{.hypervisor}} network configuration issue
|
||||||
|
|
||||||
Suggested workarounds:
|
Suggested workarounds:
|
||||||
|
|
||||||
- Disable your local VPN or firewall software
|
- Disable your local VPN or firewall software
|
||||||
- Configure your local VPN or firewall to allow access to {{.ip}}
|
- Configure your local VPN or firewall to allow access to {{.ip}}
|
||||||
- Restart or reinstall {{.hypervisor}}
|
- Restart or reinstall {{.hypervisor}}
|
||||||
- Use an alternative --vm-driver`, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip})
|
- Use an alternative --vm-driver
|
||||||
|
- Use --force to override this connectivity check
|
||||||
|
`, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func tryLookup(r command.Runner) {
|
func tryLookup(r command.Runner) {
|
||||||
// DNS check
|
// DNS check
|
||||||
if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil {
|
if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil {
|
||||||
glog.Warningf("%s failed: %v", rr.Args, err)
|
glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err)
|
||||||
out.WarningT("VM may be unable to resolve external DNS records")
|
// will try with without query type for ISOs with different busybox versions.
|
||||||
|
if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil {
|
||||||
|
glog.Warningf("nslookup failed: %v", err)
|
||||||
|
out.WarningT("Node may be unable to resolve external DNS records")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func tryRegistry(r command.Runner) {
|
func tryRegistry(r command.Runner) {
|
||||||
// Try an HTTPS connection to the image repository
|
// Try an HTTPS connection to the image repository
|
||||||
proxy := os.Getenv("HTTPS_PROXY")
|
proxy := os.Getenv("HTTPS_PROXY")
|
||||||
|
@ -1170,7 +1190,7 @@ func tryRegistry(r command.Runner) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// getKubernetesVersion ensures that the requested version is reasonable
|
// getKubernetesVersion ensures that the requested version is reasonable
|
||||||
func getKubernetesVersion(old *cfg.MachineConfig) (string, bool) {
|
func getKubernetesVersion(old *config.MachineConfig) (string, bool) {
|
||||||
paramVersion := viper.GetString(kubernetesVersion)
|
paramVersion := viper.GetString(kubernetesVersion)
|
||||||
isUpgrade := false
|
isUpgrade := false
|
||||||
|
|
||||||
|
@ -1240,7 +1260,7 @@ func getKubernetesVersion(old *cfg.MachineConfig) (string, bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// setupKubeAdm adds any requested files into the VM before Kubernetes is started
|
// setupKubeAdm adds any requested files into the VM before Kubernetes is started
|
||||||
func setupKubeAdm(mAPI libmachine.API, config cfg.MachineConfig) bootstrapper.Bootstrapper {
|
func setupKubeAdm(mAPI libmachine.API, cfg config.MachineConfig, node config.Node) bootstrapper.Bootstrapper {
|
||||||
bs, err := getClusterBootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper))
|
bs, err := getClusterBootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("Failed to get bootstrapper", err)
|
exit.WithError("Failed to get bootstrapper", err)
|
||||||
|
@ -1249,17 +1269,17 @@ func setupKubeAdm(mAPI libmachine.API, config cfg.MachineConfig) bootstrapper.Bo
|
||||||
out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value})
|
out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value})
|
||||||
}
|
}
|
||||||
// Loads cached images, generates config files, download binaries
|
// Loads cached images, generates config files, download binaries
|
||||||
if err := bs.UpdateCluster(config); err != nil {
|
if err := bs.UpdateCluster(cfg); err != nil {
|
||||||
exit.WithError("Failed to update cluster", err)
|
exit.WithError("Failed to update cluster", err)
|
||||||
}
|
}
|
||||||
if err := bs.SetupCerts(config.KubernetesConfig); err != nil {
|
if err := bs.SetupCerts(cfg.KubernetesConfig, node); err != nil {
|
||||||
exit.WithError("Failed to setup certs", err)
|
exit.WithError("Failed to setup certs", err)
|
||||||
}
|
}
|
||||||
return bs
|
return bs
|
||||||
}
|
}
|
||||||
|
|
||||||
// configureRuntimes does what needs to happen to get a runtime going.
|
// configureRuntimes does what needs to happen to get a runtime going.
|
||||||
func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s cfg.KubernetesConfig) cruntime.Manager {
|
func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig) cruntime.Manager {
|
||||||
config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: runner, ImageRepository: k8s.ImageRepository, KubernetesVersion: k8s.KubernetesVersion}
|
config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: runner, ImageRepository: k8s.ImageRepository, KubernetesVersion: k8s.KubernetesVersion}
|
||||||
cr, err := cruntime.New(config)
|
cr, err := cruntime.New(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1279,16 +1299,16 @@ func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s cfg.Ku
|
||||||
}
|
}
|
||||||
|
|
||||||
// bootstrapCluster starts Kubernetes using the chosen bootstrapper
|
// bootstrapCluster starts Kubernetes using the chosen bootstrapper
|
||||||
func bootstrapCluster(bs bootstrapper.Bootstrapper, r cruntime.Manager, runner command.Runner, kc cfg.KubernetesConfig, preexisting bool, isUpgrade bool) {
|
func bootstrapCluster(bs bootstrapper.Bootstrapper, r cruntime.Manager, runner command.Runner, mc config.MachineConfig, preexisting bool, isUpgrade bool) {
|
||||||
if isUpgrade || !preexisting {
|
if isUpgrade || !preexisting {
|
||||||
out.T(out.Pulling, "Pulling images ...")
|
out.T(out.Pulling, "Pulling images ...")
|
||||||
if err := bs.PullImages(kc); err != nil {
|
if err := bs.PullImages(mc.KubernetesConfig); err != nil {
|
||||||
out.T(out.FailureType, "Unable to pull images, which may be OK: {{.error}}", out.V{"error": err})
|
out.T(out.FailureType, "Unable to pull images, which may be OK: {{.error}}", out.V{"error": err})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out.T(out.Launch, "Launching Kubernetes ... ")
|
out.T(out.Launch, "Launching Kubernetes ... ")
|
||||||
if err := bs.StartCluster(kc); err != nil {
|
if err := bs.StartCluster(mc); err != nil {
|
||||||
exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(r, bs, runner))
|
exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(r, bs, runner))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1320,6 +1340,16 @@ func configureMounts() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// saveConfig saves profile cluster configuration in $MINIKUBE_HOME/profiles/<profilename>/config.json
|
// saveConfig saves profile cluster configuration in $MINIKUBE_HOME/profiles/<profilename>/config.json
|
||||||
func saveConfig(clusterCfg *cfg.MachineConfig) error {
|
func saveConfig(clusterCfg *config.MachineConfig) error {
|
||||||
return cfg.CreateProfile(viper.GetString(cfg.MachineProfile), clusterCfg)
|
return config.SaveProfile(viper.GetString(config.MachineProfile), clusterCfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func saveNodeToConfig(cfg *config.MachineConfig, node *config.Node) error {
|
||||||
|
for i, n := range cfg.Nodes {
|
||||||
|
if n.Name == node.Name {
|
||||||
|
cfg.Nodes[i] = *node
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return saveConfig(cfg)
|
||||||
}
|
}
|
||||||
|
|
|
@ -121,7 +121,7 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
|
||||||
if err := os.Setenv("HTTP_PROXY", test.proxy); err != nil {
|
if err := os.Setenv("HTTP_PROXY", test.proxy); err != nil {
|
||||||
t.Fatalf("Unexpected error setting HTTP_PROXY: %v", err)
|
t.Fatalf("Unexpected error setting HTTP_PROXY: %v", err)
|
||||||
}
|
}
|
||||||
config, err := generateCfgFromFlags(cmd, k8sVersion, "none")
|
config, _, err := generateCfgFromFlags(cmd, k8sVersion, "none")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Got unexpected error %v during config generation", err)
|
t.Fatalf("Got unexpected error %v during config generation", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,15 +19,18 @@ package cmd
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/docker/machine/libmachine"
|
||||||
"github.com/docker/machine/libmachine/state"
|
"github.com/docker/machine/libmachine/state"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
|
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
|
||||||
"k8s.io/minikube/pkg/minikube/cluster"
|
"k8s.io/minikube/pkg/minikube/cluster"
|
||||||
"k8s.io/minikube/pkg/minikube/config"
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
"k8s.io/minikube/pkg/minikube/constants"
|
"k8s.io/minikube/pkg/minikube/constants"
|
||||||
|
@ -40,16 +43,15 @@ import (
|
||||||
var statusFormat string
|
var statusFormat string
|
||||||
var output string
|
var output string
|
||||||
|
|
||||||
// KubeconfigStatus represents the kubeconfig status
|
const (
|
||||||
var KubeconfigStatus = struct {
|
// Additional states used by kubeconfig
|
||||||
Configured string
|
Configured = "Configured" // ~state.Saved
|
||||||
Misconfigured string
|
Misconfigured = "Misconfigured" // ~state.Error
|
||||||
}{
|
// Additional states used for clarity
|
||||||
Configured: `Configured`,
|
Nonexistent = "Nonexistent" // ~state.None
|
||||||
Misconfigured: `Misconfigured`,
|
)
|
||||||
}
|
|
||||||
|
|
||||||
// Status represents the status
|
// Status holds string representations of component states
|
||||||
type Status struct {
|
type Status struct {
|
||||||
Host string
|
Host string
|
||||||
Kubelet string
|
Kubelet string
|
||||||
|
@ -81,7 +83,6 @@ var statusCmd = &cobra.Command{
|
||||||
exit.UsageT("Cannot use both --output and --format options")
|
exit.UsageT("Cannot use both --output and --format options")
|
||||||
}
|
}
|
||||||
|
|
||||||
var returnCode = 0
|
|
||||||
api, err := machine.NewAPIClient()
|
api, err := machine.NewAPIClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithCodeT(exit.Unavailable, "Error getting client: {{.error}}", out.V{"error": err})
|
exit.WithCodeT(exit.Unavailable, "Error getting client: {{.error}}", out.V{"error": err})
|
||||||
|
@ -89,81 +90,128 @@ var statusCmd = &cobra.Command{
|
||||||
defer api.Close()
|
defer api.Close()
|
||||||
|
|
||||||
machineName := viper.GetString(config.MachineProfile)
|
machineName := viper.GetString(config.MachineProfile)
|
||||||
|
st, err := status(api, machineName)
|
||||||
hostSt, err := cluster.GetHostStatus(api, machineName)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("Error getting host status", err)
|
glog.Errorf("status error: %v", err)
|
||||||
}
|
}
|
||||||
|
if st.Host == Nonexistent {
|
||||||
kubeletSt := state.None.String()
|
glog.Errorf("The %q cluster does not exist!", machineName)
|
||||||
kubeconfigSt := state.None.String()
|
|
||||||
apiserverSt := state.None.String()
|
|
||||||
|
|
||||||
if hostSt == state.Running.String() {
|
|
||||||
clusterBootstrapper, err := getClusterBootstrapper(api, viper.GetString(cmdcfg.Bootstrapper))
|
|
||||||
if err != nil {
|
|
||||||
exit.WithError("Error getting bootstrapper", err)
|
|
||||||
}
|
|
||||||
kubeletSt, err = clusterBootstrapper.GetKubeletStatus()
|
|
||||||
if err != nil {
|
|
||||||
glog.Warningf("kubelet err: %v", err)
|
|
||||||
returnCode |= clusterNotRunningStatusFlag
|
|
||||||
} else if kubeletSt != state.Running.String() {
|
|
||||||
returnCode |= clusterNotRunningStatusFlag
|
|
||||||
}
|
|
||||||
|
|
||||||
ip, err := cluster.GetHostDriverIP(api, machineName)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorln("Error host driver ip status:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
apiserverPort, err := kubeconfig.Port(machineName)
|
|
||||||
if err != nil {
|
|
||||||
// Fallback to presuming default apiserver port
|
|
||||||
apiserverPort = constants.APIServerPort
|
|
||||||
}
|
|
||||||
|
|
||||||
apiserverSt, err = clusterBootstrapper.GetAPIServerStatus(ip, apiserverPort)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorln("Error apiserver status:", err)
|
|
||||||
} else if apiserverSt != state.Running.String() {
|
|
||||||
returnCode |= clusterNotRunningStatusFlag
|
|
||||||
}
|
|
||||||
|
|
||||||
ks, err := kubeconfig.IsClusterInConfig(ip, machineName)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorln("Error kubeconfig status:", err)
|
|
||||||
}
|
|
||||||
if ks {
|
|
||||||
kubeconfigSt = KubeconfigStatus.Configured
|
|
||||||
} else {
|
|
||||||
kubeconfigSt = KubeconfigStatus.Misconfigured
|
|
||||||
returnCode |= k8sNotRunningStatusFlag
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
returnCode |= minikubeNotRunningStatusFlag
|
|
||||||
}
|
|
||||||
|
|
||||||
status := Status{
|
|
||||||
Host: hostSt,
|
|
||||||
Kubelet: kubeletSt,
|
|
||||||
APIServer: apiserverSt,
|
|
||||||
Kubeconfig: kubeconfigSt,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
switch strings.ToLower(output) {
|
switch strings.ToLower(output) {
|
||||||
case "text":
|
case "text":
|
||||||
printStatusText(status)
|
if err := statusText(st, os.Stdout); err != nil {
|
||||||
|
exit.WithError("status text failure", err)
|
||||||
|
}
|
||||||
case "json":
|
case "json":
|
||||||
printStatusJSON(status)
|
if err := statusJSON(st, os.Stdout); err != nil {
|
||||||
|
exit.WithError("status json failure", err)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
|
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
|
||||||
}
|
}
|
||||||
|
|
||||||
os.Exit(returnCode)
|
os.Exit(exitCode(st))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func exitCode(st *Status) int {
|
||||||
|
c := 0
|
||||||
|
if st.Host != state.Running.String() {
|
||||||
|
c |= minikubeNotRunningStatusFlag
|
||||||
|
}
|
||||||
|
if st.APIServer != state.Running.String() || st.Kubelet != state.Running.String() {
|
||||||
|
c |= clusterNotRunningStatusFlag
|
||||||
|
}
|
||||||
|
if st.Kubeconfig != Configured {
|
||||||
|
c |= k8sNotRunningStatusFlag
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func status(api libmachine.API, name string) (*Status, error) {
|
||||||
|
st := &Status{
|
||||||
|
Host: Nonexistent,
|
||||||
|
APIServer: Nonexistent,
|
||||||
|
Kubelet: Nonexistent,
|
||||||
|
Kubeconfig: Nonexistent,
|
||||||
|
}
|
||||||
|
|
||||||
|
hs, err := cluster.GetHostStatus(api, name)
|
||||||
|
glog.Infof("%s host status = %q (err=%v)", name, hs, err)
|
||||||
|
if err != nil {
|
||||||
|
return st, errors.Wrap(err, "host")
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have no record of this host. Return nonexistent struct
|
||||||
|
if hs == state.None.String() {
|
||||||
|
return st, nil
|
||||||
|
}
|
||||||
|
st.Host = hs
|
||||||
|
|
||||||
|
// If it's not running, quickly bail out rather than delivering conflicting messages
|
||||||
|
if st.Host != state.Running.String() {
|
||||||
|
glog.Infof("host is not running, skipping remaining checks")
|
||||||
|
st.APIServer = st.Host
|
||||||
|
st.Kubelet = st.Host
|
||||||
|
st.Kubeconfig = st.Host
|
||||||
|
return st, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have a fully operational host, now we can check for details
|
||||||
|
ip, err := cluster.GetHostDriverIP(api, name)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorln("Error host driver ip status:", err)
|
||||||
|
st.APIServer = state.Error.String()
|
||||||
|
return st, err
|
||||||
|
}
|
||||||
|
|
||||||
|
port, err := kubeconfig.Port(name)
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("unable to get port: %v", err)
|
||||||
|
port = constants.APIServerPort
|
||||||
|
}
|
||||||
|
|
||||||
|
st.Kubeconfig = Misconfigured
|
||||||
|
ok, err := kubeconfig.IsClusterInConfig(ip, name)
|
||||||
|
glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err)
|
||||||
|
if ok {
|
||||||
|
st.Kubeconfig = Configured
|
||||||
|
}
|
||||||
|
|
||||||
|
host, err := cluster.CheckIfHostExistsAndLoad(api, name)
|
||||||
|
if err != nil {
|
||||||
|
return st, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cr, err := machine.CommandRunner(host)
|
||||||
|
if err != nil {
|
||||||
|
return st, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stk, err := kverify.KubeletStatus(cr)
|
||||||
|
glog.Infof("%s kubelet status = %s (err=%v)", name, stk, err)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("kubelet err: %v", err)
|
||||||
|
st.Kubelet = state.Error.String()
|
||||||
|
} else {
|
||||||
|
st.Kubelet = stk.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
sta, err := kverify.APIServerStatus(cr, ip, port)
|
||||||
|
glog.Infof("%s apiserver status = %s (err=%v)", name, stk, err)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorln("Error apiserver status:", err)
|
||||||
|
st.APIServer = state.Error.String()
|
||||||
|
} else {
|
||||||
|
st.APIServer = sta.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return st, nil
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
statusCmd.Flags().StringVarP(&statusFormat, "format", "f", defaultStatusFormat,
|
statusCmd.Flags().StringVarP(&statusFormat, "format", "f", defaultStatusFormat,
|
||||||
`Go template format string for the status output. The format for Go templates can be found here: https://golang.org/pkg/text/template/
|
`Go template format string for the status output. The format for Go templates can be found here: https://golang.org/pkg/text/template/
|
||||||
|
@ -172,25 +220,26 @@ For the list accessible variables for the template, see the struct values here:
|
||||||
`minikube status --output OUTPUT. json, text`)
|
`minikube status --output OUTPUT. json, text`)
|
||||||
}
|
}
|
||||||
|
|
||||||
var printStatusText = func(status Status) {
|
func statusText(st *Status, w io.Writer) error {
|
||||||
tmpl, err := template.New("status").Parse(statusFormat)
|
tmpl, err := template.New("status").Parse(statusFormat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("Error creating status template", err)
|
return err
|
||||||
}
|
}
|
||||||
err = tmpl.Execute(os.Stdout, status)
|
if err := tmpl.Execute(w, st); err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
exit.WithError("Error executing status template", err)
|
|
||||||
}
|
}
|
||||||
if status.Kubeconfig == KubeconfigStatus.Misconfigured {
|
if st.Kubeconfig == Misconfigured {
|
||||||
out.WarningT("Warning: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`")
|
_, err := w.Write([]byte("\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n"))
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var printStatusJSON = func(status Status) {
|
func statusJSON(st *Status, w io.Writer) error {
|
||||||
|
js, err := json.Marshal(st)
|
||||||
jsonString, err := json.Marshal(status)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit.WithError("Error converting status to json", err)
|
return err
|
||||||
}
|
}
|
||||||
out.String(string(jsonString))
|
_, err = w.Write(js)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,107 @@
|
||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestExitCode(t *testing.T) {
|
||||||
|
var tests = []struct {
|
||||||
|
name string
|
||||||
|
want int
|
||||||
|
state *Status
|
||||||
|
}{
|
||||||
|
{"ok", 0, &Status{Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured}},
|
||||||
|
{"paused", 2, &Status{Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured}},
|
||||||
|
{"down", 7, &Status{Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured}},
|
||||||
|
{"missing", 7, &Status{Host: "Nonexistent", Kubelet: "Nonexistent", APIServer: "Nonexistent", Kubeconfig: "Nonexistent"}},
|
||||||
|
}
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
got := exitCode(tc.state)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("exitcode(%+v) = %d, want: %d", tc.state, got, tc.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatusText(t *testing.T) {
|
||||||
|
var tests = []struct {
|
||||||
|
name string
|
||||||
|
state *Status
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "ok",
|
||||||
|
state: &Status{Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured},
|
||||||
|
want: "host: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "paused",
|
||||||
|
state: &Status{Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured},
|
||||||
|
want: "host: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "down",
|
||||||
|
state: &Status{Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured},
|
||||||
|
want: "host: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
err := statusText(tc.state, &b)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("text(%+v) error: %v", tc.state, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
got := b.String()
|
||||||
|
if got != tc.want {
|
||||||
|
t.Errorf("text(%+v) = %q, want: %q", tc.state, got, tc.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatusJSON(t *testing.T) {
|
||||||
|
var tests = []struct {
|
||||||
|
name string
|
||||||
|
state *Status
|
||||||
|
}{
|
||||||
|
{"ok", &Status{Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured}},
|
||||||
|
{"paused", &Status{Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured}},
|
||||||
|
{"down", &Status{Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured}},
|
||||||
|
}
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
err := statusJSON(tc.state, &b)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("json(%+v) error: %v", tc.state, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
st := &Status{}
|
||||||
|
if err := json.Unmarshal(b.Bytes(), st); err != nil {
|
||||||
|
t.Errorf("json(%+v) unmarshal error: %v", tc.state, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/machine/libmachine/mcnerror"
|
"github.com/docker/machine/libmachine/mcnerror"
|
||||||
|
"github.com/golang/glog"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
@ -54,6 +55,11 @@ func runStop(cmd *cobra.Command, args []string) {
|
||||||
nonexistent := false
|
nonexistent := false
|
||||||
stop := func() (err error) {
|
stop := func() (err error) {
|
||||||
err = cluster.StopHost(api)
|
err = cluster.StopHost(api)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
glog.Warningf("stop host returned error: %v", err)
|
||||||
|
|
||||||
switch err := errors.Cause(err).(type) {
|
switch err := errors.Cause(err).(type) {
|
||||||
case mcnerror.ErrHostDoesNotExist:
|
case mcnerror.ErrHostDoesNotExist:
|
||||||
out.T(out.Meh, `"{{.profile_name}}" VM does not exist, nothing to stop`, out.V{"profile_name": profile})
|
out.T(out.Meh, `"{{.profile_name}}" VM does not exist, nothing to stop`, out.V{"profile_name": profile})
|
||||||
|
|
|
@ -0,0 +1,98 @@
|
||||||
|
/*
|
||||||
|
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
|
"k8s.io/minikube/pkg/minikube/cluster"
|
||||||
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
|
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||||
|
"k8s.io/minikube/pkg/minikube/exit"
|
||||||
|
"k8s.io/minikube/pkg/minikube/machine"
|
||||||
|
"k8s.io/minikube/pkg/minikube/out"
|
||||||
|
)
|
||||||
|
|
||||||
|
// unpauseCmd represents the docker-pause command
|
||||||
|
var unpauseCmd = &cobra.Command{
|
||||||
|
Use: "unpause",
|
||||||
|
Short: "unpause Kubernetes",
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
cname := viper.GetString(config.MachineProfile)
|
||||||
|
api, err := machine.NewAPIClient()
|
||||||
|
if err != nil {
|
||||||
|
exit.WithError("Error getting client", err)
|
||||||
|
}
|
||||||
|
defer api.Close()
|
||||||
|
cc, err := config.Load(cname)
|
||||||
|
|
||||||
|
if err != nil && !config.IsNotExist(err) {
|
||||||
|
exit.WithError("Error loading profile config", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
out.ErrT(out.Meh, `"{{.name}}" profile does not exist`, out.V{"name": cname})
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
glog.Infof("config: %+v", cc)
|
||||||
|
host, err := cluster.CheckIfHostExistsAndLoad(api, cname)
|
||||||
|
if err != nil {
|
||||||
|
exit.WithError("Error getting host", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := machine.CommandRunner(host)
|
||||||
|
if err != nil {
|
||||||
|
exit.WithError("Failed to get command runner", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r})
|
||||||
|
if err != nil {
|
||||||
|
exit.WithError("Failed runtime", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Infof("namespaces: %v keys: %v", namespaces, viper.AllSettings())
|
||||||
|
if allNamespaces {
|
||||||
|
namespaces = nil //all
|
||||||
|
} else {
|
||||||
|
if len(namespaces) == 0 {
|
||||||
|
exit.WithCodeT(exit.BadUsage, "Use -A to specify all namespaces")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ids, err := cluster.Unpause(cr, r, namespaces)
|
||||||
|
if err != nil {
|
||||||
|
exit.WithError("Pause", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if namespaces == nil {
|
||||||
|
out.T(out.Pause, "Unpaused kubelet and {{.count}} containers", out.V{"count": len(ids)})
|
||||||
|
} else {
|
||||||
|
out.T(out.Pause, "Unpaused kubelet and {{.count}} containers in: {{.namespaces}}", out.V{"count": len(ids), "namespaces": strings.Join(namespaces, ", ")})
|
||||||
|
}
|
||||||
|
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
unpauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", cluster.DefaultNamespaces, "namespaces to unpause")
|
||||||
|
unpauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, unpause all namespaces")
|
||||||
|
}
|
|
@ -17,20 +17,26 @@ limitations under the License.
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"k8s.io/minikube/pkg/minikube/perf"
|
||||||
)
|
)
|
||||||
|
|
||||||
var rootCmd = &cobra.Command{
|
var rootCmd = &cobra.Command{
|
||||||
Use: "mkcmp [path to first binary] [path to second binary]",
|
Use: "mkcmp [path to first binary] [path to second binary]",
|
||||||
Short: "mkcmp is used to compare performance of two minikube binaries",
|
Short: "mkcmp is used to compare performance of two minikube binaries",
|
||||||
|
SilenceUsage: true,
|
||||||
|
SilenceErrors: true,
|
||||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||||
return validateArgs(args)
|
return validateArgs(args)
|
||||||
},
|
},
|
||||||
Run: func(cmd *cobra.Command, args []string) {},
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
return perf.CompareMinikubeStart(context.Background(), os.Stdout, args)
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateArgs(args []string) error {
|
func validateArgs(args []string) error {
|
||||||
|
@ -43,7 +49,7 @@ func validateArgs(args []string) error {
|
||||||
// Execute runs the mkcmp command
|
// Execute runs the mkcmp command
|
||||||
func Execute() {
|
func Execute() {
|
||||||
if err := rootCmd.Execute(); err != nil {
|
if err := rootCmd.Execute(); err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println("Error:", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,56 +0,0 @@
|
||||||
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Pod
|
|
||||||
metadata:
|
|
||||||
name: kube-addon-manager
|
|
||||||
namespace: kube-system
|
|
||||||
labels:
|
|
||||||
component: kube-addon-manager
|
|
||||||
version: v9.0.2
|
|
||||||
kubernetes.io/minikube-addons: addon-manager
|
|
||||||
spec:
|
|
||||||
hostNetwork: true
|
|
||||||
containers:
|
|
||||||
- name: kube-addon-manager
|
|
||||||
image: {{default "k8s.gcr.io" .ImageRepository}}/kube-addon-manager{{.ExoticArch}}:v9.0.2
|
|
||||||
env:
|
|
||||||
- name: KUBECONFIG
|
|
||||||
value: /var/lib/minikube/kubeconfig
|
|
||||||
- name: TEST_ADDON_CHECK_INTERVAL_SEC
|
|
||||||
value: "5"
|
|
||||||
- name: ADDON_MANAGER_LEADER_ELECTION
|
|
||||||
value: "false"
|
|
||||||
- name: KUBECTL_EXTRA_PRUNE_WHITELIST
|
|
||||||
value: install.istio.io/v1alpha2/IstioControlPlane
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 5m
|
|
||||||
memory: 50Mi
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /etc/kubernetes/
|
|
||||||
name: addons
|
|
||||||
readOnly: true
|
|
||||||
- mountPath: /var/lib/minikube/
|
|
||||||
name: kubeconfig
|
|
||||||
readOnly: true
|
|
||||||
volumes:
|
|
||||||
- hostPath:
|
|
||||||
path: /etc/kubernetes/
|
|
||||||
name: addons
|
|
||||||
- hostPath:
|
|
||||||
path: /var/lib/minikube/
|
|
||||||
name: kubeconfig
|
|
|
@ -18,7 +18,7 @@ spec:
|
||||||
addonmanager.kubernetes.io/mode: Reconcile
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- image: upmcenterprises/registry-creds:1.9
|
- image: upmcenterprises/registry-creds:1.10
|
||||||
name: registry-creds
|
name: registry-creds
|
||||||
imagePullPolicy: Always
|
imagePullPolicy: Always
|
||||||
env:
|
env:
|
||||||
|
@ -77,6 +77,21 @@ spec:
|
||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
name: registry-creds-gcr
|
name: registry-creds-gcr
|
||||||
key: gcrurl
|
key: gcrurl
|
||||||
|
- name: ACR_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: registry-creds-acr
|
||||||
|
key: ACR_PASSWORD
|
||||||
|
- name: ACR_URL
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: registry-creds-acr
|
||||||
|
key: ACR_URL
|
||||||
|
- name: ACR_CLIENT_ID
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: registry-creds-acr
|
||||||
|
key: ACR_CLIENT_ID
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: gcr-creds
|
- name: gcr-creds
|
||||||
mountPath: "/root/.config/gcloud"
|
mountPath: "/root/.config/gcloud"
|
||||||
|
|
|
@ -19,6 +19,8 @@ BR2_ROOTFS_USERS_TABLES="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/use
|
||||||
BR2_ROOTFS_OVERLAY="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/rootfs-overlay"
|
BR2_ROOTFS_OVERLAY="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/rootfs-overlay"
|
||||||
BR2_GLOBAL_PATCH_DIR="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/patches"
|
BR2_GLOBAL_PATCH_DIR="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/patches"
|
||||||
BR2_LINUX_KERNEL=y
|
BR2_LINUX_KERNEL=y
|
||||||
|
BR2_LINUX_KERNEL_BZIMAGE=y
|
||||||
|
BR2_LINUX_KERNEL_LZ4=y
|
||||||
BR2_LINUX_KERNEL_USE_CUSTOM_CONFIG=y
|
BR2_LINUX_KERNEL_USE_CUSTOM_CONFIG=y
|
||||||
BR2_LINUX_KERNEL_CUSTOM_CONFIG_FILE="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/linux_defconfig"
|
BR2_LINUX_KERNEL_CUSTOM_CONFIG_FILE="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/linux_defconfig"
|
||||||
BR2_LINUX_KERNEL_NEEDS_HOST_LIBELF=y
|
BR2_LINUX_KERNEL_NEEDS_HOST_LIBELF=y
|
||||||
|
@ -55,7 +57,8 @@ BR2_PACKAGE_SYSTEMD_MACHINED=y
|
||||||
BR2_PACKAGE_SYSTEMD_VCONSOLE=y
|
BR2_PACKAGE_SYSTEMD_VCONSOLE=y
|
||||||
BR2_PACKAGE_UTIL_LINUX_NSENTER=y
|
BR2_PACKAGE_UTIL_LINUX_NSENTER=y
|
||||||
BR2_PACKAGE_UTIL_LINUX_SCHEDUTILS=y
|
BR2_PACKAGE_UTIL_LINUX_SCHEDUTILS=y
|
||||||
BR2_TARGET_ROOTFS_CPIO_BZIP2=y
|
BR2_TARGET_ROOTFS_CPIO=y
|
||||||
|
BR2_TARGET_ROOTFS_CPIO_GZIP=y
|
||||||
BR2_TARGET_ROOTFS_ISO9660=y
|
BR2_TARGET_ROOTFS_ISO9660=y
|
||||||
BR2_TARGET_ROOTFS_ISO9660_BOOT_MENU="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/isolinux.cfg"
|
BR2_TARGET_ROOTFS_ISO9660_BOOT_MENU="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/isolinux.cfg"
|
||||||
BR2_TARGET_SYSLINUX=y
|
BR2_TARGET_SYSLINUX=y
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
menu "System tools"
|
menu "System tools"
|
||||||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/runc-master/Config.in"
|
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/runc-master/Config.in"
|
||||||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/podman/Config.in"
|
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/podman/Config.in"
|
||||||
|
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/varlink/Config.in"
|
||||||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/conmon-master/Config.in"
|
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/conmon-master/Config.in"
|
||||||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/crio-bin/Config.in"
|
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/crio-bin/Config.in"
|
||||||
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/crictl-bin/Config.in"
|
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/crictl-bin/Config.in"
|
||||||
|
|
|
@ -29,7 +29,7 @@ if [ ! -n "$BOOT2DOCKER_DATA" ]; then
|
||||||
# Let kernel re-read partition table
|
# Let kernel re-read partition table
|
||||||
partprobe
|
partprobe
|
||||||
# Add the data partition
|
# Add the data partition
|
||||||
(echo n; echo p; echo 1; echo ; echo ; echo w) | fdisk $UNPARTITIONED_HD
|
(echo g; echo n; echo 1; echo ; echo ; echo w) | fdisk $UNPARTITIONED_HD
|
||||||
# Let kernel re-read partition table
|
# Let kernel re-read partition table
|
||||||
partprobe
|
partprobe
|
||||||
# wait for the partition to actually exist, timeout after about 5 seconds
|
# wait for the partition to actually exist, timeout after about 5 seconds
|
||||||
|
@ -57,7 +57,7 @@ if [ ! -n "$BOOT2DOCKER_DATA" ]; then
|
||||||
if [ $NON_NUL == 0 ]; then
|
if [ $NON_NUL == 0 ]; then
|
||||||
# Create the partition, format it and then mount it
|
# Create the partition, format it and then mount it
|
||||||
echo "NEW VMware boot2docker managed disk image ($UNPARTITIONED_HD): formatting it for use"
|
echo "NEW VMware boot2docker managed disk image ($UNPARTITIONED_HD): formatting it for use"
|
||||||
(echo n; echo p; echo 1; echo ; echo ; echo w) | fdisk $UNPARTITIONED_HD
|
(echo g; echo n; echo 1; echo ; echo ; echo w) | fdisk $UNPARTITIONED_HD
|
||||||
BOOT2DOCKER_DATA=`echo "${UNPARTITIONED_HD}1"`
|
BOOT2DOCKER_DATA=`echo "${UNPARTITIONED_HD}1"`
|
||||||
mkfs.ext4 -i 2048 -L $LABEL $BOOT2DOCKER_DATA
|
mkfs.ext4 -i 2048 -L $LABEL $BOOT2DOCKER_DATA
|
||||||
else
|
else
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
config BR2_PACKAGE_VARLINK
|
||||||
|
bool "varlink"
|
||||||
|
default y
|
|
@ -0,0 +1,3 @@
|
||||||
|
sha256 3857f109574750403b233b5fdf73f1852d8decc33dac8f73bd49f2003b69ad22 16.tar.gz
|
||||||
|
sha256 0dcb451f32033154c56710c216e67f245923fe2b011321271f6670e5a2285ce6 17.tar.gz
|
||||||
|
sha256 7a32543643116ad105da4ddb2f8030de7dcad1cdb3feb1a214ae5e7b65a6a198 18.tar.gz
|
|
@ -0,0 +1,7 @@
|
||||||
|
VARLINK_VERSION = 18
|
||||||
|
VARLINK_SITE = https://github.com/varlink/libvarlink/archive
|
||||||
|
VARLINK_SOURCE = $(VARLINK_VERSION).tar.gz
|
||||||
|
VARLINK_LICENSE = Apache-2.0
|
||||||
|
VARLINK_LICENSE_FILES = LICENSE
|
||||||
|
|
||||||
|
$(eval $(meson-package))
|
|
@ -1,4 +1,12 @@
|
||||||
[
|
[
|
||||||
|
{
|
||||||
|
"name": "v1.7.0",
|
||||||
|
"checksums": {
|
||||||
|
"darwin": "1f3785e9521eabe241df0481fa41887a6a3873307bac8a89fd0e48aa7612be29",
|
||||||
|
"linux": "f1fcab9f161a64f19b618a901e50488ed6f1c6ab20695c82623586a701d2d261",
|
||||||
|
"windows": "2f4448f32e505bf38ba52cd3678c73622a6bb452c63a4179d590f6da26520c68"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "v1.6.2",
|
"name": "v1.6.2",
|
||||||
"checksums": {
|
"checksums": {
|
||||||
|
|
|
@ -13,5 +13,6 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
FROM scratch
|
FROM scratch
|
||||||
COPY out/storage-provisioner /storage-provisioner
|
ARG arch
|
||||||
CMD ["/storage-provisioner"]
|
COPY out/storage-provisioner-${arch} /storage-provisioner
|
||||||
|
CMD ["/storage-provisioner"]
|
||||||
|
|
|
@ -1,17 +0,0 @@
|
||||||
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
FROM s390x/ubuntu:16.04
|
|
||||||
COPY out/storage-provisioner storage-provisioner
|
|
||||||
CMD ["/storage-provisioner"]
|
|
87
go.mod
87
go.mod
|
@ -2,10 +2,6 @@ module k8s.io/minikube
|
||||||
|
|
||||||
go 1.13
|
go 1.13
|
||||||
|
|
||||||
require github.com/google/go-containerregistry v0.0.0-20180731221751-697ee0b3d46e
|
|
||||||
|
|
||||||
require k8s.io/kubernetes v1.15.2
|
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Parallels/docker-machine-parallels v1.3.0
|
github.com/Parallels/docker-machine-parallels v1.3.0
|
||||||
github.com/Sirupsen/logrus v0.0.0-20170822132746-89742aefa4b2 // indirect
|
github.com/Sirupsen/logrus v0.0.0-20170822132746-89742aefa4b2 // indirect
|
||||||
|
@ -17,7 +13,7 @@ require (
|
||||||
github.com/cloudfoundry-attic/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21
|
github.com/cloudfoundry-attic/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21
|
||||||
github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 // indirect
|
github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 // indirect
|
||||||
github.com/docker/docker v1.13.1
|
github.com/docker/docker v1.13.1
|
||||||
github.com/docker/go-units v0.3.3
|
github.com/docker/go-units v0.4.0
|
||||||
github.com/docker/machine v0.7.1-0.20190718054102-a555e4f7a8f5 // version is 0.7.1 to pin to a555e4f7a8f5
|
github.com/docker/machine v0.7.1-0.20190718054102-a555e4f7a8f5 // version is 0.7.1 to pin to a555e4f7a8f5
|
||||||
github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f
|
github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f
|
||||||
github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f // indirect
|
github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f // indirect
|
||||||
|
@ -26,14 +22,16 @@ require (
|
||||||
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
|
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||||
github.com/google/go-cmp v0.3.0
|
github.com/google/go-cmp v0.3.0
|
||||||
|
github.com/google/go-containerregistry v0.0.0-20180731221751-697ee0b3d46e
|
||||||
github.com/googleapis/gnostic v0.3.0 // indirect
|
github.com/googleapis/gnostic v0.3.0 // indirect
|
||||||
github.com/gorilla/mux v1.7.1 // indirect
|
github.com/gorilla/mux v1.7.3 // indirect
|
||||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce // indirect
|
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce // indirect
|
||||||
github.com/hashicorp/go-getter v1.4.0
|
github.com/hashicorp/go-getter v1.4.0
|
||||||
github.com/hashicorp/go-multierror v0.0.0-20160811015721-8c5f0ad93604 // indirect
|
github.com/hashicorp/go-multierror v0.0.0-20160811015721-8c5f0ad93604 // indirect
|
||||||
github.com/hashicorp/go-retryablehttp v0.5.4
|
github.com/hashicorp/go-retryablehttp v0.5.4
|
||||||
github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214 // indirect
|
github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214 // indirect
|
||||||
github.com/hooklift/iso9660 v0.0.0-20170318115843-1cf07e5970d8
|
github.com/hooklift/iso9660 v0.0.0-20170318115843-1cf07e5970d8
|
||||||
|
github.com/imdario/mergo v0.3.8 // indirect
|
||||||
github.com/intel-go/cpuid v0.0.0-20181003105527-1a4a6f06a1c6 // indirect
|
github.com/intel-go/cpuid v0.0.0-20181003105527-1a4a6f06a1c6 // indirect
|
||||||
github.com/jimmidyson/go-download v0.0.0-20161028105827-7f9a90c8c95b
|
github.com/jimmidyson/go-download v0.0.0-20161028105827-7f9a90c8c95b
|
||||||
github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345
|
github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345
|
||||||
|
@ -48,10 +46,10 @@ require (
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
||||||
github.com/libvirt/libvirt-go v3.4.0+incompatible
|
github.com/libvirt/libvirt-go v3.4.0+incompatible
|
||||||
github.com/machine-drivers/docker-machine-driver-vmware v0.1.1
|
github.com/machine-drivers/docker-machine-driver-vmware v0.1.1
|
||||||
github.com/mattn/go-isatty v0.0.8
|
github.com/mattn/go-isatty v0.0.9
|
||||||
github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936
|
github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936
|
||||||
github.com/moby/hyperkit v0.0.0-20171020124204-a12cd7250bcd
|
github.com/moby/hyperkit v0.0.0-20171020124204-a12cd7250bcd
|
||||||
github.com/olekukonko/tablewriter v0.0.0-20160923125401-bdcc175572fd
|
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5
|
||||||
github.com/otiai10/copy v1.0.2
|
github.com/otiai10/copy v1.0.2
|
||||||
github.com/pborman/uuid v1.2.0
|
github.com/pborman/uuid v1.2.0
|
||||||
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2
|
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2
|
||||||
|
@ -61,30 +59,25 @@ require (
|
||||||
github.com/pmezard/go-difflib v1.0.0
|
github.com/pmezard/go-difflib v1.0.0
|
||||||
github.com/samalba/dockerclient v0.0.0-20160414174713-91d7393ff859 // indirect
|
github.com/samalba/dockerclient v0.0.0-20160414174713-91d7393ff859 // indirect
|
||||||
github.com/shirou/gopsutil v2.18.12+incompatible
|
github.com/shirou/gopsutil v2.18.12+incompatible
|
||||||
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 // indirect
|
|
||||||
github.com/sirupsen/logrus v1.4.1 // indirect
|
|
||||||
github.com/spf13/cobra v0.0.5
|
github.com/spf13/cobra v0.0.5
|
||||||
github.com/spf13/pflag v1.0.3
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/spf13/viper v1.3.2
|
github.com/spf13/viper v1.3.2
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20151027082146-e0fe6f683076 // indirect
|
github.com/xeipuuv/gojsonpointer v0.0.0-20151027082146-e0fe6f683076 // indirect
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20150808065054-e02fc20de94c // indirect
|
github.com/xeipuuv/gojsonreference v0.0.0-20150808065054-e02fc20de94c // indirect
|
||||||
github.com/xeipuuv/gojsonschema v0.0.0-20160623135812-c539bca196be
|
github.com/xeipuuv/gojsonschema v0.0.0-20160623135812-c539bca196be
|
||||||
github.com/zchee/go-vmnet v0.0.0-20161021174912-97ebf9174097
|
github.com/zchee/go-vmnet v0.0.0-20161021174912-97ebf9174097
|
||||||
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d
|
golang.org/x/build v0.0.0-20190927031335-2835ba2e683f
|
||||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
|
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
||||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb
|
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456
|
||||||
golang.org/x/text v0.3.2
|
golang.org/x/text v0.3.2
|
||||||
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
|
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
|
||||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
|
|
||||||
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect
|
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect
|
||||||
gotest.tools v2.2.0+incompatible
|
k8s.io/api v0.17.2
|
||||||
k8s.io/api v0.0.0
|
k8s.io/apimachinery v0.17.2
|
||||||
k8s.io/apimachinery v0.0.0
|
|
||||||
k8s.io/client-go v11.0.0+incompatible
|
k8s.io/client-go v11.0.0+incompatible
|
||||||
k8s.io/klog v0.3.3 // indirect
|
k8s.io/kubectl v0.0.0
|
||||||
k8s.io/kubectl v0.0.0-00010101000000-000000000000
|
k8s.io/kubernetes v1.17.2
|
||||||
|
k8s.io/utils v0.0.0-20200122174043-1e243dd1a584 // indirect
|
||||||
sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.0+incompatible
|
sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.0+incompatible
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -92,28 +85,28 @@ replace (
|
||||||
git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999
|
git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999
|
||||||
github.com/docker/machine => github.com/machine-drivers/machine v0.7.1-0.20191109154235-b39d5b50de51
|
github.com/docker/machine => github.com/machine-drivers/machine v0.7.1-0.20191109154235-b39d5b50de51
|
||||||
github.com/hashicorp/go-getter => github.com/afbjorklund/go-getter v1.4.1-0.20190910175809-eb9f6c26742c
|
github.com/hashicorp/go-getter => github.com/afbjorklund/go-getter v1.4.1-0.20190910175809-eb9f6c26742c
|
||||||
k8s.io/api => k8s.io/kubernetes/staging/src/k8s.io/api v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/api => k8s.io/api v0.17.2
|
||||||
k8s.io/apiextensions-apiserver => k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.17.2
|
||||||
k8s.io/apimachinery => k8s.io/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/apimachinery => k8s.io/apimachinery v0.17.2
|
||||||
k8s.io/apiserver => k8s.io/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/apiserver => k8s.io/apiserver v0.17.2
|
||||||
k8s.io/cli-runtime => k8s.io/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/cli-runtime => k8s.io/cli-runtime v0.17.2
|
||||||
k8s.io/client-go => k8s.io/kubernetes/staging/src/k8s.io/client-go v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/client-go => k8s.io/client-go v0.17.2
|
||||||
k8s.io/cloud-provider => k8s.io/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/cloud-provider => k8s.io/cloud-provider v0.17.2
|
||||||
k8s.io/cluster-bootstrap => k8s.io/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.17.2
|
||||||
k8s.io/code-generator => k8s.io/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/code-generator => k8s.io/code-generator v0.17.2
|
||||||
k8s.io/component-base => k8s.io/kubernetes/staging/src/k8s.io/component-base v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/component-base => k8s.io/component-base v0.17.2
|
||||||
k8s.io/cri-api => k8s.io/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/cri-api => k8s.io/cri-api v0.17.2
|
||||||
k8s.io/csi-translation-lib => k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.17.2
|
||||||
k8s.io/kube-aggregator => k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.17.2
|
||||||
k8s.io/kube-controller-manager => k8s.io/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.17.2
|
||||||
k8s.io/kube-proxy => k8s.io/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/kube-proxy => k8s.io/kube-proxy v0.17.2
|
||||||
k8s.io/kube-scheduler => k8s.io/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.17.2
|
||||||
k8s.io/kubectl => k8s.io/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/kubectl => k8s.io/kubectl v0.17.2
|
||||||
k8s.io/kubelet => k8s.io/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/kubelet => k8s.io/kubelet v0.17.2
|
||||||
k8s.io/legacy-cloud-providers => k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.17.2
|
||||||
k8s.io/metrics => k8s.io/kubernetes/staging/src/k8s.io/metrics v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/metrics => k8s.io/metrics v0.17.2
|
||||||
k8s.io/node-api => k8s.io/kubernetes/staging/src/k8s.io/node-api v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/node-api => k8s.io/node-api v0.17.2
|
||||||
k8s.io/sample-apiserver => k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.17.2
|
||||||
k8s.io/sample-cli-plugin => k8s.io/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.17.2
|
||||||
k8s.io/sample-controller => k8s.io/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20190623232353-8c3b7d7679cc
|
k8s.io/sample-controller => k8s.io/sample-controller v0.17.2
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,10 +1,38 @@
|
||||||
ARG COMMIT_SHA
|
ARG COMMIT_SHA
|
||||||
FROM kindest/node:v1.16.2
|
# using base image created by kind https://github.com/kubernetes-sigs/kind
|
||||||
|
# which is an ubuntu 19.10 with an entry-point that helps running systemd
|
||||||
|
# could be changed to any debian that can run systemd
|
||||||
|
FROM kindest/base:v20200122-2dfe64b2
|
||||||
USER root
|
USER root
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
sudo \
|
sudo \
|
||||||
dnsutils \
|
dnsutils \
|
||||||
|
openssh-server \
|
||||||
|
docker.io \
|
||||||
&& apt-get clean -y
|
&& apt-get clean -y
|
||||||
|
# disable containerd by default
|
||||||
|
RUN systemctl disable containerd
|
||||||
|
RUN rm /etc/crictl.yaml
|
||||||
|
# enable docker which is default
|
||||||
|
RUN systemctl enable docker
|
||||||
|
# making SSH work for docker container
|
||||||
|
# based on https://github.com/rastasheep/ubuntu-sshd/blob/master/18.04/Dockerfile
|
||||||
|
RUN mkdir /var/run/sshd
|
||||||
|
RUN echo 'root:root' |chpasswd
|
||||||
|
RUN sed -ri 's/^#?PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config
|
||||||
|
RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config
|
||||||
|
EXPOSE 22
|
||||||
|
# for minikube ssh. to match VM using "docker" as username
|
||||||
|
RUN adduser --ingroup docker --disabled-password --gecos '' docker
|
||||||
|
RUN adduser docker sudo
|
||||||
|
RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
|
||||||
|
USER docker
|
||||||
|
RUN mkdir /home/docker/.ssh
|
||||||
|
# Deleting leftovers
|
||||||
|
USER root
|
||||||
|
# kind base-image entry-point expects a "kind" folder for product_name,product_uuid
|
||||||
|
# https://github.com/kubernetes-sigs/kind/blob/master/images/base/files/usr/local/bin/entrypoint
|
||||||
|
RUN mkdir -p /kind
|
||||||
RUN rm -rf \
|
RUN rm -rf \
|
||||||
/var/cache/debconf/* \
|
/var/cache/debconf/* \
|
||||||
/var/lib/apt/lists/* \
|
/var/lib/apt/lists/* \
|
||||||
|
@ -14,5 +42,4 @@ RUN rm -rf \
|
||||||
/usr/share/doc/* \
|
/usr/share/doc/* \
|
||||||
/usr/share/man/* \
|
/usr/share/man/* \
|
||||||
/usr/share/local/* \
|
/usr/share/local/* \
|
||||||
/kind/bin/kubeadm /kind/bin/kubelet /kind/systemd /kind/images /kind/manifests
|
|
||||||
RUN echo "kic! Build: ${COMMIT_SHA} Time :$(date)" > "/kic.txt"
|
RUN echo "kic! Build: ${COMMIT_SHA} Time :$(date)" > "/kic.txt"
|
||||||
|
|
|
@ -0,0 +1,35 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2019 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
# This script runs the integration tests on a Linux machine for the KVM Driver
|
||||||
|
|
||||||
|
# The script expects the following env variables:
|
||||||
|
# MINIKUBE_LOCATION: GIT_COMMIT from upstream build.
|
||||||
|
# COMMIT: Actual commit ID from upstream build
|
||||||
|
# EXTRA_BUILD_ARGS (optional): Extra args to be passed into the minikube integrations tests
|
||||||
|
# access_token: The Github API access token. Injected by the Jenkins credential provider.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
OS_ARCH="linux-amd64"
|
||||||
|
VM_DRIVER="docker"
|
||||||
|
JOB_NAME="Docker_Linux"
|
||||||
|
|
||||||
|
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
|
||||||
|
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"
|
||||||
|
|
||||||
|
source ./common.sh
|
|
@ -40,6 +40,7 @@ jobs=(
|
||||||
# 'KVM-GPU_Linux' - Disabled
|
# 'KVM-GPU_Linux' - Disabled
|
||||||
'KVM_Linux'
|
'KVM_Linux'
|
||||||
'none_Linux'
|
'none_Linux'
|
||||||
|
'Docker_Linux'
|
||||||
)
|
)
|
||||||
|
|
||||||
# retry_github_status provides reliable github status updates
|
# retry_github_status provides reliable github status updates
|
||||||
|
|
|
@ -35,18 +35,18 @@ grep -E "^VERSION_MAJOR \\?=" Makefile | grep "${VERSION_MAJOR}"
|
||||||
grep -E "^VERSION_MINOR \\?=" Makefile | grep "${VERSION_MINOR}"
|
grep -E "^VERSION_MINOR \\?=" Makefile | grep "${VERSION_MINOR}"
|
||||||
grep -E "^VERSION_BUILD \\?=" Makefile | grep "${VERSION_BUILD}"
|
grep -E "^VERSION_BUILD \\?=" Makefile | grep "${VERSION_BUILD}"
|
||||||
|
|
||||||
# Diagnostics
|
# Force go packages to the Jekins home directory
|
||||||
go env GOPATH
|
export GOPATH=$HOME/go
|
||||||
|
|
||||||
# Build and upload
|
# Build and upload
|
||||||
env GOPATH=$HOME/go BUILD_IN_DOCKER=y \
|
env BUILD_IN_DOCKER=y \
|
||||||
make -j 16 \
|
make -j 16 \
|
||||||
all \
|
all \
|
||||||
out/minikube-installer.exe \
|
out/minikube-installer.exe \
|
||||||
"out/minikube_${DEB_VERSION}.deb" \
|
"out/minikube_${DEB_VERSION}-0_amd64.deb" \
|
||||||
"out/minikube-${RPM_VERSION}.rpm" \
|
"out/minikube-${RPM_VERSION}-0.x86_64.rpm" \
|
||||||
"out/docker-machine-driver-kvm2_${DEB_VERSION}.deb" \
|
"out/docker-machine-driver-kvm2_${DEB_VERSION}-0_amd64.deb" \
|
||||||
"out/docker-machine-driver-kvm2-${RPM_VERSION}.rpm"
|
"out/docker-machine-driver-kvm2-${RPM_VERSION}-0.x86_64.rpm"
|
||||||
|
|
||||||
make checksum
|
make checksum
|
||||||
|
|
||||||
|
|
|
@ -61,14 +61,14 @@ See [Getting Started](https://minikube.sigs.k8s.io/docs/start/)
|
||||||
|
|
||||||
# ================================================================================
|
# ================================================================================
|
||||||
# Deleting release from github before creating new one
|
# Deleting release from github before creating new one
|
||||||
github-release delete \
|
github-release -v delete \
|
||||||
--user "${GITHUB_ORGANIZATION}" \
|
--user "${GITHUB_ORGANIZATION}" \
|
||||||
--repo "${GITHUB_REPO}" \
|
--repo "${GITHUB_REPO}" \
|
||||||
--tag "${TAGNAME}" \
|
--tag "${TAGNAME}" \
|
||||||
|| true
|
|| true
|
||||||
|
|
||||||
# Creating a new release in github
|
# Creating a new release in github
|
||||||
github-release release ${RELEASE_FLAGS} \
|
github-release -v release ${RELEASE_FLAGS} \
|
||||||
--user "${GITHUB_ORGANIZATION}" \
|
--user "${GITHUB_ORGANIZATION}" \
|
||||||
--repo "${GITHUB_REPO}" \
|
--repo "${GITHUB_REPO}" \
|
||||||
--tag "${TAGNAME}" \
|
--tag "${TAGNAME}" \
|
||||||
|
@ -84,8 +84,8 @@ FILES_TO_UPLOAD=(
|
||||||
'minikube-windows-amd64.exe'
|
'minikube-windows-amd64.exe'
|
||||||
'minikube-windows-amd64.exe.sha256'
|
'minikube-windows-amd64.exe.sha256'
|
||||||
'minikube-installer.exe'
|
'minikube-installer.exe'
|
||||||
"minikube_${DEB_VERSION}.deb"
|
"minikube_${DEB_VERSION}-0_amd64.deb"
|
||||||
"minikube-${RPM_VERSION}.rpm"
|
"minikube-${RPM_VERSION}-0.x86_64.rpm"
|
||||||
'docker-machine-driver-kvm2'
|
'docker-machine-driver-kvm2'
|
||||||
'docker-machine-driver-kvm2.sha256'
|
'docker-machine-driver-kvm2.sha256'
|
||||||
'docker-machine-driver-hyperkit'
|
'docker-machine-driver-hyperkit'
|
||||||
|
@ -106,7 +106,7 @@ do
|
||||||
n=0
|
n=0
|
||||||
until [ $n -ge 5 ]
|
until [ $n -ge 5 ]
|
||||||
do
|
do
|
||||||
github-release upload \
|
github-release -v upload \
|
||||||
--user "${GITHUB_ORGANIZATION}" \
|
--user "${GITHUB_ORGANIZATION}" \
|
||||||
--repo "${GITHUB_REPO}" \
|
--repo "${GITHUB_REPO}" \
|
||||||
--tag "${TAGNAME}" \
|
--tag "${TAGNAME}" \
|
||||||
|
|
|
@ -2,7 +2,7 @@ Package: docker-machine-driver-kvm2
|
||||||
Version: --VERSION--
|
Version: --VERSION--
|
||||||
Section: base
|
Section: base
|
||||||
Priority: optional
|
Priority: optional
|
||||||
Architecture: amd64
|
Architecture: --ARCH--
|
||||||
Depends: libvirt0 (>= 1.3.1)
|
Depends: libvirt0 (>= 1.3.1)
|
||||||
Recommends: minikube
|
Recommends: minikube
|
||||||
Maintainer: Thomas Strömberg <t+minikube@stromberg.org>
|
Maintainer: Thomas Strömberg <t+minikube@stromberg.org>
|
||||||
|
|
|
@ -18,7 +18,7 @@ a consistent way to manage various VM providers.
|
||||||
%prep
|
%prep
|
||||||
mkdir -p %{name}-%{version}
|
mkdir -p %{name}-%{version}
|
||||||
cd %{name}-%{version}
|
cd %{name}-%{version}
|
||||||
cp --OUT--/docker-machine-driver-kvm2 .
|
cp --OUT--/docker-machine-driver-kvm2-%{_arch} docker-machine-driver-kvm2
|
||||||
|
|
||||||
%install
|
%install
|
||||||
cd %{name}-%{version}
|
cd %{name}-%{version}
|
||||||
|
|
|
@ -18,9 +18,13 @@ package addons
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"path"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"k8s.io/minikube/pkg/minikube/assets"
|
"k8s.io/minikube/pkg/minikube/assets"
|
||||||
|
@ -39,6 +43,7 @@ const defaultStorageClassProvisioner = "standard"
|
||||||
|
|
||||||
// Set sets a value
|
// Set sets a value
|
||||||
func Set(name, value, profile string) error {
|
func Set(name, value, profile string) error {
|
||||||
|
glog.Infof("Setting %s=%s in profile %q", name, value, profile)
|
||||||
a, valid := isAddonValid(name)
|
a, valid := isAddonValid(name)
|
||||||
if !valid {
|
if !valid {
|
||||||
return errors.Errorf("%s is not a valid addon", name)
|
return errors.Errorf("%s is not a valid addon", name)
|
||||||
|
@ -64,7 +69,7 @@ func Set(name, value, profile string) error {
|
||||||
return errors.Wrap(err, "running callbacks")
|
return errors.Wrap(err, "running callbacks")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the value
|
glog.Infof("Writing out %q config to set %s=%v...", profile, name, value)
|
||||||
return config.Write(profile, c)
|
return config.Write(profile, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,6 +103,7 @@ func SetBool(m *config.MachineConfig, name string, val string) error {
|
||||||
|
|
||||||
// enableOrDisableAddon updates addon status executing any commands necessary
|
// enableOrDisableAddon updates addon status executing any commands necessary
|
||||||
func enableOrDisableAddon(name, val, profile string) error {
|
func enableOrDisableAddon(name, val, profile string) error {
|
||||||
|
glog.Infof("Setting addon %s=%s in %q", name, val, profile)
|
||||||
enable, err := strconv.ParseBool(val)
|
enable, err := strconv.ParseBool(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "parsing bool: %s", name)
|
return errors.Wrapf(err, "parsing bool: %s", name)
|
||||||
|
@ -105,14 +111,14 @@ func enableOrDisableAddon(name, val, profile string) error {
|
||||||
addon := assets.Addons[name]
|
addon := assets.Addons[name]
|
||||||
|
|
||||||
// check addon status before enabling/disabling it
|
// check addon status before enabling/disabling it
|
||||||
alreadySet, err := isAddonAlreadySet(addon, enable)
|
alreadySet, err := isAddonAlreadySet(addon, enable, profile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
out.ErrT(out.Conflict, "{{.error}}", out.V{"error": err})
|
out.ErrT(out.Conflict, "{{.error}}", out.V{"error": err})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
//if addon is already enabled or disabled, do nothing
|
|
||||||
if alreadySet {
|
if alreadySet {
|
||||||
return nil
|
glog.Warningf("addon %s should already be in state %v", name, val)
|
||||||
}
|
}
|
||||||
|
|
||||||
if name == "istio" && enable {
|
if name == "istio" && enable {
|
||||||
|
@ -132,20 +138,15 @@ func enableOrDisableAddon(name, val, profile string) error {
|
||||||
}
|
}
|
||||||
defer api.Close()
|
defer api.Close()
|
||||||
|
|
||||||
//if minikube is not running, we return and simply update the value in the addon
|
|
||||||
//config and rewrite the file
|
|
||||||
if !cluster.IsMinikubeRunning(api) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg, err := config.Load(profile)
|
cfg, err := config.Load(profile)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !config.IsNotExist(err) {
|
||||||
exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err})
|
exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err})
|
||||||
}
|
}
|
||||||
|
|
||||||
host, err := cluster.CheckIfHostExistsAndLoad(api, cfg.Name)
|
host, err := cluster.CheckIfHostExistsAndLoad(api, profile)
|
||||||
if err != nil {
|
if err != nil || !cluster.IsHostRunning(api, profile) {
|
||||||
return errors.Wrap(err, "getting host")
|
glog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement (err=%v)", profile, addon.Name(), enable, err)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd, err := machine.CommandRunner(host)
|
cmd, err := machine.CommandRunner(host)
|
||||||
|
@ -154,14 +155,13 @@ func enableOrDisableAddon(name, val, profile string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
data := assets.GenerateTemplateData(cfg.KubernetesConfig)
|
data := assets.GenerateTemplateData(cfg.KubernetesConfig)
|
||||||
return enableOrDisableAddonInternal(addon, cmd, data, enable)
|
return enableOrDisableAddonInternal(addon, cmd, data, enable, profile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func isAddonAlreadySet(addon *assets.Addon, enable bool) (bool, error) {
|
func isAddonAlreadySet(addon *assets.Addon, enable bool, profile string) (bool, error) {
|
||||||
addonStatus, err := addon.IsEnabled()
|
addonStatus, err := addon.IsEnabled(profile)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrap(err, "get the addon status")
|
return false, errors.Wrap(err, "is enabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
if addonStatus && enable {
|
if addonStatus && enable {
|
||||||
|
@ -173,34 +173,53 @@ func isAddonAlreadySet(addon *assets.Addon, enable bool) (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func enableOrDisableAddonInternal(addon *assets.Addon, cmd command.Runner, data interface{}, enable bool) error {
|
func enableOrDisableAddonInternal(addon *assets.Addon, cmd command.Runner, data interface{}, enable bool, profile string) error {
|
||||||
var err error
|
files := []string{}
|
||||||
|
|
||||||
updateFile := cmd.Copy
|
|
||||||
if !enable {
|
|
||||||
updateFile = cmd.Remove
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, addon := range addon.Assets {
|
for _, addon := range addon.Assets {
|
||||||
var addonFile assets.CopyableFile
|
var f assets.CopyableFile
|
||||||
|
var err error
|
||||||
if addon.IsTemplate() {
|
if addon.IsTemplate() {
|
||||||
addonFile, err = addon.Evaluate(data)
|
f, err = addon.Evaluate(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "evaluate bundled addon %s asset", addon.GetAssetName())
|
return errors.Wrapf(err, "evaluate bundled addon %s asset", addon.GetAssetName())
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
addonFile = addon
|
f = addon
|
||||||
}
|
}
|
||||||
if err := updateFile(addonFile); err != nil {
|
fPath := path.Join(f.GetTargetDir(), f.GetTargetName())
|
||||||
return errors.Wrapf(err, "updating addon %s", addon.AssetName)
|
|
||||||
|
if enable {
|
||||||
|
glog.Infof("installing %s", fPath)
|
||||||
|
if err := cmd.Copy(f); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
glog.Infof("Removing %+v", fPath)
|
||||||
|
defer func() {
|
||||||
|
if err := cmd.Remove(f); err != nil {
|
||||||
|
glog.Warningf("error removing %s; addon should still be disabled as expected", fPath)
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
files = append(files, fPath)
|
||||||
}
|
}
|
||||||
|
command, err := kubectlCommand(profile, files, enable)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
glog.Infof("Running: %s", command)
|
||||||
|
rr, err := cmd.RunCmd(command)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "addon apply")
|
||||||
|
}
|
||||||
|
glog.Infof("output:\n%s", rr.Output())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// enableOrDisableStorageClasses enables or disables storage classes
|
// enableOrDisableStorageClasses enables or disables storage classes
|
||||||
func enableOrDisableStorageClasses(name, val, profile string) error {
|
func enableOrDisableStorageClasses(name, val, profile string) error {
|
||||||
|
glog.Infof("enableOrDisableStorageClasses %s=%v on %q", name, val, profile)
|
||||||
enable, err := strconv.ParseBool(val)
|
enable, err := strconv.ParseBool(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Error parsing boolean")
|
return errors.Wrap(err, "Error parsing boolean")
|
||||||
|
@ -215,6 +234,17 @@ func enableOrDisableStorageClasses(name, val, profile string) error {
|
||||||
return errors.Wrapf(err, "Error getting storagev1 interface %v ", err)
|
return errors.Wrapf(err, "Error getting storagev1 interface %v ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
api, err := machine.NewAPIClient()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "machine client")
|
||||||
|
}
|
||||||
|
defer api.Close()
|
||||||
|
|
||||||
|
if !cluster.IsHostRunning(api, profile) {
|
||||||
|
glog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement", profile, name, val)
|
||||||
|
return enableOrDisableAddon(name, val, profile)
|
||||||
|
}
|
||||||
|
|
||||||
if enable {
|
if enable {
|
||||||
// Only StorageClass for 'name' should be marked as default
|
// Only StorageClass for 'name' should be marked as default
|
||||||
err = storageclass.SetDefaultStorageClass(storagev1, class)
|
err = storageclass.SetDefaultStorageClass(storagev1, class)
|
||||||
|
@ -231,3 +261,48 @@ func enableOrDisableStorageClasses(name, val, profile string) error {
|
||||||
|
|
||||||
return enableOrDisableAddon(name, val, profile)
|
return enableOrDisableAddon(name, val, profile)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start enables the default addons for a profile, plus any additional
|
||||||
|
func Start(profile string, toEnable map[string]bool, additional []string) {
|
||||||
|
start := time.Now()
|
||||||
|
glog.Infof("enableAddons start: toEnable=%v, additional=%s", toEnable, additional)
|
||||||
|
defer func() {
|
||||||
|
glog.Infof("enableAddons completed in %s", time.Since(start))
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Get the default values of any addons not saved to our config
|
||||||
|
for name, a := range assets.Addons {
|
||||||
|
defaultVal, err := a.IsEnabled(profile)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("is-enabled failed for %q: %v", a.Name(), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, exists := toEnable[name]
|
||||||
|
if !exists {
|
||||||
|
toEnable[name] = defaultVal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply new addons
|
||||||
|
for _, name := range additional {
|
||||||
|
toEnable[name] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
toEnableList := []string{}
|
||||||
|
for k, v := range toEnable {
|
||||||
|
if v {
|
||||||
|
toEnableList = append(toEnableList, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(toEnableList)
|
||||||
|
|
||||||
|
out.T(out.AddonEnable, "Enabling addons: {{.addons}}", out.V{"addons": strings.Join(toEnableList, ", ")})
|
||||||
|
for _, a := range toEnableList {
|
||||||
|
err := Set(a, "true", profile)
|
||||||
|
if err != nil {
|
||||||
|
// Intentionally non-fatal
|
||||||
|
out.WarningT("Enabling '{{.name}}' returned an error: {{.error}}", out.V{"name": a, "error": err})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -17,93 +17,115 @@ limitations under the License.
|
||||||
package addons
|
package addons
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gotest.tools/assert"
|
|
||||||
"k8s.io/minikube/pkg/minikube/assets"
|
"k8s.io/minikube/pkg/minikube/assets"
|
||||||
"k8s.io/minikube/pkg/minikube/config"
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
|
"k8s.io/minikube/pkg/minikube/localpath"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func createTestProfile(t *testing.T) string {
|
||||||
|
t.Helper()
|
||||||
|
td, err := ioutil.TempDir("", "profile")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("tempdir: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.Setenv(localpath.MinikubeHome, td)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("error setting up test environment. could not set %s", localpath.MinikubeHome)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not necessary, but it is a handy random alphanumeric
|
||||||
|
name := filepath.Base(td)
|
||||||
|
if err := os.MkdirAll(config.ProfileFolderPath(name), 0777); err != nil {
|
||||||
|
t.Fatalf("error creating temporary directory")
|
||||||
|
}
|
||||||
|
if err := config.DefaultLoader.WriteConfigToFile(name, &config.MachineConfig{}); err != nil {
|
||||||
|
t.Fatalf("error creating temporary profile config: %v", err)
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
func TestIsAddonAlreadySet(t *testing.T) {
|
func TestIsAddonAlreadySet(t *testing.T) {
|
||||||
testCases := []struct {
|
profile := createTestProfile(t)
|
||||||
addonName string
|
if err := Set("registry", "true", profile); err != nil {
|
||||||
}{
|
t.Errorf("unable to set registry true: %v", err)
|
||||||
{
|
}
|
||||||
addonName: "ingress",
|
enabled, err := assets.Addons["registry"].IsEnabled(profile)
|
||||||
},
|
if err != nil {
|
||||||
|
t.Errorf("registry: %v", err)
|
||||||
{
|
}
|
||||||
addonName: "registry",
|
if !enabled {
|
||||||
},
|
t.Errorf("expected registry to be enabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range testCases {
|
enabled, err = assets.Addons["ingress"].IsEnabled(profile)
|
||||||
addon := assets.Addons[test.addonName]
|
if err != nil {
|
||||||
addonStatus, _ := addon.IsEnabled()
|
t.Errorf("ingress: %v", err)
|
||||||
|
|
||||||
alreadySet, err := isAddonAlreadySet(addon, addonStatus)
|
|
||||||
if !alreadySet {
|
|
||||||
if addonStatus {
|
|
||||||
t.Errorf("Did not get expected status, \n\n expected %+v already enabled", test.addonName)
|
|
||||||
} else {
|
|
||||||
t.Errorf("Did not get expected status, \n\n expected %+v already disabled", test.addonName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Got unexpected error: %+v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
if enabled {
|
||||||
|
t.Errorf("expected ingress to not be enabled")
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDisableUnknownAddon(t *testing.T) {
|
func TestDisableUnknownAddon(t *testing.T) {
|
||||||
tmpProfile := "temp-minikube-profile"
|
profile := createTestProfile(t)
|
||||||
if err := Set("InvalidAddon", "false", tmpProfile); err == nil {
|
if err := Set("InvalidAddon", "false", profile); err == nil {
|
||||||
t.Fatalf("Disable did not return error for unknown addon")
|
t.Fatalf("Disable did not return error for unknown addon")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEnableUnknownAddon(t *testing.T) {
|
func TestEnableUnknownAddon(t *testing.T) {
|
||||||
tmpProfile := "temp-minikube-profile"
|
profile := createTestProfile(t)
|
||||||
if err := Set("InvalidAddon", "true", tmpProfile); err == nil {
|
if err := Set("InvalidAddon", "true", profile); err == nil {
|
||||||
t.Fatalf("Enable did not return error for unknown addon")
|
t.Fatalf("Enable did not return error for unknown addon")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEnableAndDisableAddon(t *testing.T) {
|
func TestEnableAndDisableAddon(t *testing.T) {
|
||||||
tests := []struct {
|
profile := createTestProfile(t)
|
||||||
name string
|
|
||||||
enable bool
|
// enable
|
||||||
}{
|
if err := Set("dashboard", "true", profile); err != nil {
|
||||||
{
|
t.Errorf("Disable returned unexpected error: " + err.Error())
|
||||||
name: "test enable",
|
|
||||||
enable: true,
|
|
||||||
}, {
|
|
||||||
name: "test disable",
|
|
||||||
enable: false,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
c, err := config.DefaultLoader.LoadConfigFromFile(profile)
|
||||||
t.Run(test.name, func(t *testing.T) {
|
if err != nil {
|
||||||
tmpProfile := "temp-minikube-profile"
|
t.Errorf("unable to load profile: %v", err)
|
||||||
if err := os.MkdirAll(config.ProfileFolderPath(tmpProfile), 0777); err != nil {
|
}
|
||||||
t.Fatalf("error creating temporary directory")
|
if c.Addons["dashboard"] != true {
|
||||||
}
|
t.Errorf("expected dashboard to be enabled")
|
||||||
defer os.RemoveAll(config.ProfileFolderPath(tmpProfile))
|
}
|
||||||
|
|
||||||
if err := config.DefaultLoader.WriteConfigToFile(tmpProfile, &config.MachineConfig{}); err != nil {
|
// disable
|
||||||
t.Fatalf("error creating temporary profile config: %v", err)
|
if err := Set("dashboard", "false", profile); err != nil {
|
||||||
}
|
t.Errorf("Disable returned unexpected error: " + err.Error())
|
||||||
if err := Set("dashboard", fmt.Sprintf("%t", test.enable), tmpProfile); err != nil {
|
}
|
||||||
t.Fatalf("Disable returned unexpected error: " + err.Error())
|
|
||||||
}
|
c, err = config.DefaultLoader.LoadConfigFromFile(profile)
|
||||||
c, err := config.DefaultLoader.LoadConfigFromFile(tmpProfile)
|
if err != nil {
|
||||||
if err != nil {
|
t.Errorf("unable to load profile: %v", err)
|
||||||
t.Fatalf("error loading config: %v", err)
|
}
|
||||||
}
|
if c.Addons["dashboard"] != false {
|
||||||
assert.Equal(t, c.Addons["dashboard"], test.enable)
|
t.Errorf("expected dashboard to be enabled")
|
||||||
})
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStart(t *testing.T) {
|
||||||
|
profile := createTestProfile(t)
|
||||||
|
Start(profile, map[string]bool{}, []string{"dashboard"})
|
||||||
|
|
||||||
|
enabled, err := assets.Addons["dashboard"].IsEnabled(profile)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("dashboard: %v", err)
|
||||||
|
}
|
||||||
|
if !enabled {
|
||||||
|
t.Errorf("expected dashboard to be enabled")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,11 +30,6 @@ type Addon struct {
|
||||||
|
|
||||||
// Addons is a list of all addons
|
// Addons is a list of all addons
|
||||||
var Addons = []*Addon{
|
var Addons = []*Addon{
|
||||||
{
|
|
||||||
name: "addon-manager",
|
|
||||||
set: SetBool,
|
|
||||||
callbacks: []setFn{enableOrDisableAddon},
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "dashboard",
|
name: "dashboard",
|
||||||
set: SetBool,
|
set: SetBool,
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package addons
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os/exec"
|
||||||
|
"path"
|
||||||
|
|
||||||
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
|
"k8s.io/minikube/pkg/minikube/constants"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// For testing
|
||||||
|
k8sVersion = kubernetesVersion
|
||||||
|
)
|
||||||
|
|
||||||
|
func kubectlCommand(profile string, files []string, enable bool) (*exec.Cmd, error) {
|
||||||
|
v, err := k8sVersion(profile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
kubectlBinary := kubectlBinaryPath(v)
|
||||||
|
|
||||||
|
kubectlAction := "apply"
|
||||||
|
if !enable {
|
||||||
|
kubectlAction = "delete"
|
||||||
|
}
|
||||||
|
|
||||||
|
args := []string{"KUBECONFIG=/var/lib/minikube/kubeconfig", kubectlBinary, kubectlAction}
|
||||||
|
for _, f := range files {
|
||||||
|
args = append(args, []string{"-f", f}...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command("sudo", args...)
|
||||||
|
return cmd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func kubernetesVersion(profile string) (string, error) {
|
||||||
|
cc, err := config.Load(profile)
|
||||||
|
if err != nil && !config.IsNotExist(err) {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
version := constants.DefaultKubernetesVersion
|
||||||
|
if cc != nil {
|
||||||
|
version = cc.KubernetesConfig.KubernetesVersion
|
||||||
|
}
|
||||||
|
return version, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func kubectlBinaryPath(version string) string {
|
||||||
|
return path.Join("/var/lib/minikube/binaries", version, "kubectl")
|
||||||
|
}
|
|
@ -0,0 +1,63 @@
|
||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package addons
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestKubectlCommand(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
description string
|
||||||
|
files []string
|
||||||
|
enable bool
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
description: "enable an addon",
|
||||||
|
files: []string{"a", "b"},
|
||||||
|
enable: true,
|
||||||
|
expected: "sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.17.0/kubectl apply -f a -f b",
|
||||||
|
}, {
|
||||||
|
description: "disable an addon",
|
||||||
|
files: []string{"a", "b"},
|
||||||
|
enable: false,
|
||||||
|
expected: "sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.17.0/kubectl delete -f a -f b",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.description, func(t *testing.T) {
|
||||||
|
originalK8sVersion := k8sVersion
|
||||||
|
defer func() { k8sVersion = originalK8sVersion }()
|
||||||
|
k8sVersion = func(_ string) (string, error) {
|
||||||
|
return "v1.17.0", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
command, err := kubectlCommand("", test.files, test.enable)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error getting kubectl command: %v", err)
|
||||||
|
}
|
||||||
|
actual := strings.Join(command.Args, " ")
|
||||||
|
|
||||||
|
if actual != test.expected {
|
||||||
|
t.Fatalf("expected does not match actual\nExpected: %s\nActual: %s", test.expected, actual)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -18,29 +18,24 @@ package kic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/machine/libmachine/drivers"
|
"github.com/docker/machine/libmachine/drivers"
|
||||||
|
"github.com/docker/machine/libmachine/log"
|
||||||
|
"github.com/docker/machine/libmachine/ssh"
|
||||||
"github.com/docker/machine/libmachine/state"
|
"github.com/docker/machine/libmachine/state"
|
||||||
|
"github.com/golang/glog"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
pkgdrivers "k8s.io/minikube/pkg/drivers"
|
pkgdrivers "k8s.io/minikube/pkg/drivers"
|
||||||
"k8s.io/minikube/pkg/drivers/kic/node"
|
|
||||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||||
|
"k8s.io/minikube/pkg/minikube/assets"
|
||||||
"k8s.io/minikube/pkg/minikube/command"
|
"k8s.io/minikube/pkg/minikube/command"
|
||||||
"k8s.io/minikube/pkg/minikube/constants"
|
"k8s.io/minikube/pkg/minikube/constants"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultPodCIDR is The CIDR to be used for pods inside the node.
|
|
||||||
const DefaultPodCIDR = "10.244.0.0/16"
|
|
||||||
|
|
||||||
// DefaultBindIPV4 is The default IP the container will bind to.
|
|
||||||
const DefaultBindIPV4 = "127.0.0.1"
|
|
||||||
|
|
||||||
// BaseImage is the base image is used to spin up kic containers
|
|
||||||
const BaseImage = "gcr.io/k8s-minikube/kicbase:v0.0.1@sha256:c4ad2938877d2ae0d5b7248a5e7182ff58c0603165c3bedfe9d503e2d380a0db"
|
|
||||||
|
|
||||||
// Driver represents a kic driver https://minikube.sigs.k8s.io/docs/reference/drivers/kic/
|
// Driver represents a kic driver https://minikube.sigs.k8s.io/docs/reference/drivers/kic/
|
||||||
type Driver struct {
|
type Driver struct {
|
||||||
*drivers.BaseDriver
|
*drivers.BaseDriver
|
||||||
|
@ -51,20 +46,6 @@ type Driver struct {
|
||||||
OCIBinary string // docker,podman
|
OCIBinary string // docker,podman
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config is configuration for the kic driver used by registry
|
|
||||||
type Config struct {
|
|
||||||
MachineName string // maps to the container name being created
|
|
||||||
CPU int // Number of CPU cores assigned to the container
|
|
||||||
Memory int // max memory in MB
|
|
||||||
StorePath string // libmachine store path
|
|
||||||
OCIBinary string // oci tool to use (docker, podman,...)
|
|
||||||
ImageDigest string // image name with sha to use for the node
|
|
||||||
HostBindPort int // port to connect to forward from container to user's machine
|
|
||||||
Mounts []oci.Mount // mounts
|
|
||||||
PortMappings []oci.PortMapping // container port mappings
|
|
||||||
Envs map[string]string // key,value of environment variables passed to the node
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDriver returns a fully configured Kic driver
|
// NewDriver returns a fully configured Kic driver
|
||||||
func NewDriver(c Config) *Driver {
|
func NewDriver(c Config) *Driver {
|
||||||
d := &Driver{
|
d := &Driver{
|
||||||
|
@ -81,28 +62,63 @@ func NewDriver(c Config) *Driver {
|
||||||
|
|
||||||
// Create a host using the driver's config
|
// Create a host using the driver's config
|
||||||
func (d *Driver) Create() error {
|
func (d *Driver) Create() error {
|
||||||
params := node.CreateConfig{
|
params := oci.CreateParams{
|
||||||
Name: d.NodeConfig.MachineName,
|
Name: d.NodeConfig.MachineName,
|
||||||
Image: d.NodeConfig.ImageDigest,
|
Image: d.NodeConfig.ImageDigest,
|
||||||
ClusterLabel: node.ClusterLabelKey + "=" + d.MachineName,
|
ClusterLabel: oci.ClusterLabelKey + "=" + d.MachineName,
|
||||||
CPUs: strconv.Itoa(d.NodeConfig.CPU),
|
CPUs: strconv.Itoa(d.NodeConfig.CPU),
|
||||||
Memory: strconv.Itoa(d.NodeConfig.Memory) + "mb",
|
Memory: strconv.Itoa(d.NodeConfig.Memory) + "mb",
|
||||||
Envs: d.NodeConfig.Envs,
|
Envs: d.NodeConfig.Envs,
|
||||||
ExtraArgs: []string{"--expose", fmt.Sprintf("%d", d.NodeConfig.HostBindPort)},
|
ExtraArgs: []string{"--expose", fmt.Sprintf("%d", d.NodeConfig.APIServerPort)},
|
||||||
OCIBinary: d.NodeConfig.OCIBinary,
|
OCIBinary: d.NodeConfig.OCIBinary,
|
||||||
|
APIServerPort: d.NodeConfig.APIServerPort,
|
||||||
}
|
}
|
||||||
|
|
||||||
// control plane specific options
|
// control plane specific options
|
||||||
params.PortMappings = append(params.PortMappings, oci.PortMapping{
|
params.PortMappings = append(params.PortMappings, oci.PortMapping{
|
||||||
ListenAddress: "127.0.0.1",
|
ListenAddress: DefaultBindIPV4,
|
||||||
HostPort: int32(d.NodeConfig.HostBindPort),
|
|
||||||
ContainerPort: constants.APIServerPort,
|
ContainerPort: constants.APIServerPort,
|
||||||
})
|
},
|
||||||
|
oci.PortMapping{
|
||||||
_, err := node.CreateNode(params)
|
ListenAddress: DefaultBindIPV4,
|
||||||
|
ContainerPort: constants.SSHPort,
|
||||||
|
},
|
||||||
|
oci.PortMapping{
|
||||||
|
ListenAddress: DefaultBindIPV4,
|
||||||
|
ContainerPort: constants.DockerDaemonPort,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
err := oci.CreateContainerNode(params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "create kic node")
|
return errors.Wrap(err, "create kic node")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := d.prepareSSH(); err != nil {
|
||||||
|
return errors.Wrap(err, "prepare kic ssh")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepareSSH will generate keys and copy to the container so minikube ssh works
|
||||||
|
func (d *Driver) prepareSSH() error {
|
||||||
|
keyPath := d.GetSSHKeyPath()
|
||||||
|
glog.Infof("Creating ssh key for kic: %s...", keyPath)
|
||||||
|
if err := ssh.GenerateSSHKey(keyPath); err != nil {
|
||||||
|
return errors.Wrap(err, "generate ssh key")
|
||||||
|
}
|
||||||
|
|
||||||
|
cmder := command.NewKICRunner(d.NodeConfig.MachineName, d.NodeConfig.OCIBinary)
|
||||||
|
f, err := assets.NewFileAsset(d.GetSSHKeyPath()+".pub", "/home/docker/.ssh/", "authorized_keys", "0644")
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "create pubkey assetfile ")
|
||||||
|
}
|
||||||
|
if err := cmder.Copy(f); err != nil {
|
||||||
|
return errors.Wrap(err, "copying pub key")
|
||||||
|
}
|
||||||
|
if rr, err := cmder.RunCmd(exec.Command("chown", "docker:docker", "/home/docker/.ssh/authorized_keys")); err != nil {
|
||||||
|
return errors.Wrapf(err, "apply authorized_keys file ownership, output %s", rr.Output())
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,36 +132,63 @@ func (d *Driver) DriverName() string {
|
||||||
|
|
||||||
// GetIP returns an IP or hostname that this host is available at
|
// GetIP returns an IP or hostname that this host is available at
|
||||||
func (d *Driver) GetIP() (string, error) {
|
func (d *Driver) GetIP() (string, error) {
|
||||||
node, err := node.Find(d.OCIBinary, d.MachineName, d.exec)
|
ip, _, err := oci.ContainerIPs(d.OCIBinary, d.MachineName)
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("ip not found for nil node")
|
|
||||||
}
|
|
||||||
ip, _, err := node.IP()
|
|
||||||
return ip, err
|
return ip, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetExternalIP returns an IP which is accissble from outside
|
||||||
|
func (d *Driver) GetExternalIP() (string, error) {
|
||||||
|
return DefaultBindIPV4, nil
|
||||||
|
}
|
||||||
|
|
||||||
// GetSSHHostname returns hostname for use with ssh
|
// GetSSHHostname returns hostname for use with ssh
|
||||||
func (d *Driver) GetSSHHostname() (string, error) {
|
func (d *Driver) GetSSHHostname() (string, error) {
|
||||||
return "", fmt.Errorf("driver does not have SSHHostName")
|
return DefaultBindIPV4, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSSHPort returns port for use with ssh
|
// GetSSHPort returns port for use with ssh
|
||||||
func (d *Driver) GetSSHPort() (int, error) {
|
func (d *Driver) GetSSHPort() (int, error) {
|
||||||
return 0, fmt.Errorf("driver does not support GetSSHPort")
|
p, err := oci.HostPortBinding(d.OCIBinary, d.MachineName, constants.SSHPort)
|
||||||
|
if err != nil {
|
||||||
|
return p, errors.Wrap(err, "get ssh host-port")
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSSHUsername returns the ssh username
|
||||||
|
func (d *Driver) GetSSHUsername() string {
|
||||||
|
return "docker"
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSSHKeyPath returns the ssh key path
|
||||||
|
func (d *Driver) GetSSHKeyPath() string {
|
||||||
|
if d.SSHKeyPath == "" {
|
||||||
|
d.SSHKeyPath = d.ResolveStorePath("id_rsa")
|
||||||
|
}
|
||||||
|
return d.SSHKeyPath
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetURL returns ip of the container running kic control-panel
|
// GetURL returns ip of the container running kic control-panel
|
||||||
func (d *Driver) GetURL() (string, error) {
|
func (d *Driver) GetURL() (string, error) {
|
||||||
return d.GetIP()
|
p, err := oci.HostPortBinding(d.NodeConfig.OCIBinary, d.MachineName, d.NodeConfig.APIServerPort)
|
||||||
|
url := fmt.Sprintf("https://%s", net.JoinHostPort("127.0.0.1", fmt.Sprint(p)))
|
||||||
|
if err != nil {
|
||||||
|
return url, errors.Wrap(err, "api host port binding")
|
||||||
|
}
|
||||||
|
return url, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetState returns the state that the host is in (running, stopped, etc)
|
// GetState returns the state that the host is in (running, stopped, etc)
|
||||||
func (d *Driver) GetState() (state.State, error) {
|
func (d *Driver) GetState() (state.State, error) {
|
||||||
|
if err := oci.PointToHostDockerDaemon(); err != nil {
|
||||||
|
return state.Error, errors.Wrap(err, "point host docker-daemon")
|
||||||
|
}
|
||||||
|
|
||||||
cmd := exec.Command(d.NodeConfig.OCIBinary, "inspect", "-f", "{{.State.Status}}", d.MachineName)
|
cmd := exec.Command(d.NodeConfig.OCIBinary, "inspect", "-f", "{{.State.Status}}", d.MachineName)
|
||||||
out, err := cmd.CombinedOutput()
|
out, err := cmd.CombinedOutput()
|
||||||
o := strings.Trim(string(out), "\n")
|
o := strings.Trim(string(out), "\n")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return state.Error, errors.Wrapf(err, "error stop node %s", d.MachineName)
|
return state.Error, errors.Wrapf(err, "get container %s status", d.MachineName)
|
||||||
}
|
}
|
||||||
switch o {
|
switch o {
|
||||||
case "running":
|
case "running":
|
||||||
|
@ -174,12 +217,17 @@ func (d *Driver) Kill() error {
|
||||||
|
|
||||||
// Remove will delete the Kic Node Container
|
// Remove will delete the Kic Node Container
|
||||||
func (d *Driver) Remove() error {
|
func (d *Driver) Remove() error {
|
||||||
if _, err := d.nodeID(d.MachineName); err != nil {
|
if _, err := oci.ContainerID(d.OCIBinary, d.MachineName); err != nil {
|
||||||
return errors.Wrapf(err, "not found node %s", d.MachineName)
|
log.Warnf("could not find the container %s to remove it.", d.MachineName)
|
||||||
}
|
}
|
||||||
cmd := exec.Command(d.NodeConfig.OCIBinary, "rm", "-f", "-v", d.MachineName)
|
cmd := exec.Command(d.NodeConfig.OCIBinary, "rm", "-f", "-v", d.MachineName)
|
||||||
if err := cmd.Run(); err != nil {
|
o, err := cmd.CombinedOutput()
|
||||||
return errors.Wrapf(err, "error removing node %s", d.MachineName)
|
out := strings.Trim(string(o), "\n")
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(out, "is already in progress") {
|
||||||
|
log.Warnf("Docker engine is stuck. please restart docker daemon on your computer.", d.MachineName)
|
||||||
|
}
|
||||||
|
return errors.Wrapf(err, "removing container %s, output %s", d.MachineName, out)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -248,13 +296,3 @@ func (d *Driver) Stop() error {
|
||||||
func (d *Driver) RunSSHCommandFromDriver() error {
|
func (d *Driver) RunSSHCommandFromDriver() error {
|
||||||
return fmt.Errorf("driver does not support RunSSHCommandFromDriver commands")
|
return fmt.Errorf("driver does not support RunSSHCommandFromDriver commands")
|
||||||
}
|
}
|
||||||
|
|
||||||
// looks up for a container node by name, will return error if not found.
|
|
||||||
func (d *Driver) nodeID(nameOrID string) (string, error) {
|
|
||||||
cmd := exec.Command(d.NodeConfig.OCIBinary, "inspect", "-f", "{{.Id}}", nameOrID)
|
|
||||||
id, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
id = []byte{}
|
|
||||||
}
|
|
||||||
return string(id), err
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,195 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2019 The Kubernetes Authors All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package node
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
|
||||||
"k8s.io/minikube/pkg/minikube/assets"
|
|
||||||
"k8s.io/minikube/pkg/minikube/command"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Docker default bridge network is named "bridge" (https://docs.docker.com/network/bridge/#use-the-default-bridge-network)
|
|
||||||
DefaultNetwork = "bridge"
|
|
||||||
ClusterLabelKey = "io.x-k8s.kic.cluster" // ClusterLabelKey is applied to each node docker container for identification
|
|
||||||
NodeRoleKey = "io.k8s.sigs.kic.role"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Node represents a handle to a kic node
|
|
||||||
// This struct must be created by one of: CreateControlPlane
|
|
||||||
type Node struct {
|
|
||||||
id string // container id
|
|
||||||
name string // container name
|
|
||||||
r command.Runner // Runner
|
|
||||||
ociBinary string
|
|
||||||
}
|
|
||||||
|
|
||||||
type CreateConfig struct {
|
|
||||||
Name string // used for container name and hostname
|
|
||||||
Image string // container image to use to create the node.
|
|
||||||
ClusterLabel string // label the containers we create using minikube so we can clean up
|
|
||||||
Role string // currently only role supported is control-plane
|
|
||||||
Mounts []oci.Mount // volume mounts
|
|
||||||
PortMappings []oci.PortMapping // ports to map to container from host
|
|
||||||
CPUs string // number of cpu cores assign to container
|
|
||||||
Memory string // memory (mbs) to assign to the container
|
|
||||||
Envs map[string]string // environment variables to pass to the container
|
|
||||||
ExtraArgs []string // a list of any extra option to pass to oci binary during creation time, for example --expose 8080...
|
|
||||||
OCIBinary string // docker or podman
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateNode creates a new container node
|
|
||||||
func CreateNode(p CreateConfig) (*Node, error) {
|
|
||||||
cmder := command.NewKICRunner(p.Name, p.OCIBinary)
|
|
||||||
runArgs := []string{
|
|
||||||
fmt.Sprintf("--cpus=%s", p.CPUs),
|
|
||||||
fmt.Sprintf("--memory=%s", p.Memory),
|
|
||||||
"-d", // run the container detached
|
|
||||||
"-t", // allocate a tty for entrypoint logs
|
|
||||||
// running containers in a container requires privileged
|
|
||||||
// NOTE: we could try to replicate this with --cap-add, and use less
|
|
||||||
// privileges, but this flag also changes some mounts that are necessary
|
|
||||||
// including some ones docker would otherwise do by default.
|
|
||||||
// for now this is what we want. in the future we may revisit this.
|
|
||||||
"--privileged",
|
|
||||||
"--security-opt", "seccomp=unconfined", // also ignore seccomp
|
|
||||||
"--tmpfs", "/tmp", // various things depend on working /tmp
|
|
||||||
"--tmpfs", "/run", // systemd wants a writable /run
|
|
||||||
// logs,pods be stroed on filesystem vs inside container,
|
|
||||||
"--volume", "/var",
|
|
||||||
// some k8s things want /lib/modules
|
|
||||||
"-v", "/lib/modules:/lib/modules:ro",
|
|
||||||
"--hostname", p.Name, // make hostname match container name
|
|
||||||
"--name", p.Name, // ... and set the container name
|
|
||||||
// label the node with the cluster ID
|
|
||||||
"--label", p.ClusterLabel,
|
|
||||||
// label the node with the role ID
|
|
||||||
"--label", fmt.Sprintf("%s=%s", NodeRoleKey, p.Role),
|
|
||||||
}
|
|
||||||
|
|
||||||
for key, val := range p.Envs {
|
|
||||||
runArgs = append(runArgs, "-e", fmt.Sprintf("%s=%s", key, val))
|
|
||||||
}
|
|
||||||
|
|
||||||
// adds node specific args
|
|
||||||
runArgs = append(runArgs, p.ExtraArgs...)
|
|
||||||
|
|
||||||
if oci.UsernsRemap(p.OCIBinary) {
|
|
||||||
// We need this argument in order to make this command work
|
|
||||||
// in systems that have userns-remap enabled on the docker daemon
|
|
||||||
runArgs = append(runArgs, "--userns=host")
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := oci.CreateContainer(p.OCIBinary,
|
|
||||||
p.Image,
|
|
||||||
oci.WithRunArgs(runArgs...),
|
|
||||||
oci.WithMounts(p.Mounts),
|
|
||||||
oci.WithPortMappings(p.PortMappings),
|
|
||||||
)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "oci create ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// we should return a handle so the caller can clean it up
|
|
||||||
node, err := Find(p.OCIBinary, p.Name, cmder)
|
|
||||||
if err != nil {
|
|
||||||
return node, errors.Wrap(err, "find node")
|
|
||||||
}
|
|
||||||
|
|
||||||
return node, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find finds a node
|
|
||||||
func Find(ociBinary string, name string, cmder command.Runner) (*Node, error) {
|
|
||||||
n, err := oci.Inspect(ociBinary, name, "{{.Id}}")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("can't find node %v", err)
|
|
||||||
}
|
|
||||||
return &Node{
|
|
||||||
ociBinary: ociBinary,
|
|
||||||
id: n[0],
|
|
||||||
name: name,
|
|
||||||
r: cmder,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteFile writes content to dest on the node
|
|
||||||
func (n *Node) WriteFile(dest, content string, perm string) error {
|
|
||||||
// create destination directory
|
|
||||||
cmd := exec.Command("mkdir", "-p", filepath.Dir(dest))
|
|
||||||
rr, err := n.r.RunCmd(cmd)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to create directory %s cmd: %v output:%q", cmd.Args, dest, rr.Output())
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd = exec.Command("cp", "/dev/stdin", dest)
|
|
||||||
cmd.Stdin = strings.NewReader(content)
|
|
||||||
|
|
||||||
if rr, err := n.r.RunCmd(cmd); err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to run: cp /dev/stdin %s cmd: %v output:%q", dest, cmd.Args, rr.Output())
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd = exec.Command("chmod", perm, dest)
|
|
||||||
_, err = n.r.RunCmd(cmd)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to run: chmod %s %s", perm, dest)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IP returns the IP address of the node
|
|
||||||
func (n *Node) IP() (ipv4 string, ipv6 string, err error) {
|
|
||||||
// retrieve the IP address of the node using docker inspect
|
|
||||||
lines, err := oci.Inspect(n.ociBinary, n.name, "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}")
|
|
||||||
if err != nil {
|
|
||||||
return "", "", errors.Wrap(err, "node ips")
|
|
||||||
}
|
|
||||||
if len(lines) != 1 {
|
|
||||||
return "", "", errors.Errorf("file should only be one line, got %d lines", len(lines))
|
|
||||||
}
|
|
||||||
ips := strings.Split(lines[0], ",")
|
|
||||||
if len(ips) != 2 {
|
|
||||||
return "", "", errors.Errorf("container addresses should have 2 values, got %d values: %+v", len(ips), ips)
|
|
||||||
}
|
|
||||||
return ips[0], ips[1], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy copies a local asset into the node
|
|
||||||
func (n *Node) Copy(ociBinary string, asset assets.CopyableFile) error {
|
|
||||||
if err := oci.Copy(ociBinary, n.name, asset); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to copy file/folder")
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.Command("chmod", asset.GetPermissions(), asset.GetTargetName())
|
|
||||||
if _, err := n.r.RunCmd(cmd); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to chmod file permissions")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes the node
|
|
||||||
func (n *Node) Remove() error {
|
|
||||||
return oci.Remove(n.ociBinary, n.name)
|
|
||||||
}
|
|
|
@ -18,96 +18,205 @@ package oci
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
"strconv"
|
||||||
"github.com/docker/machine/libmachine/state"
|
|
||||||
"k8s.io/minikube/pkg/minikube/assets"
|
|
||||||
|
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"k8s.io/minikube/pkg/minikube/constants"
|
||||||
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/cenkalti/backoff"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Stop stops a container
|
// CreateContainerNode creates a new container node
|
||||||
func Stop(ociBinary, ociID string) error {
|
func CreateContainerNode(p CreateParams) error {
|
||||||
cmd := exec.Command(ociBinary, "stop", ociID)
|
if err := PointToHostDockerDaemon(); err != nil {
|
||||||
|
return errors.Wrap(err, "point host docker-daemon")
|
||||||
|
}
|
||||||
|
|
||||||
|
runArgs := []string{
|
||||||
|
fmt.Sprintf("--cpus=%s", p.CPUs),
|
||||||
|
fmt.Sprintf("--memory=%s", p.Memory),
|
||||||
|
"-d", // run the container detached
|
||||||
|
"-t", // allocate a tty for entrypoint logs
|
||||||
|
// running containers in a container requires privileged
|
||||||
|
// NOTE: we could try to replicate this with --cap-add, and use less
|
||||||
|
// privileges, but this flag also changes some mounts that are necessary
|
||||||
|
// including some ones docker would otherwise do by default.
|
||||||
|
// for now this is what we want. in the future we may revisit this.
|
||||||
|
"--privileged",
|
||||||
|
"--security-opt", "seccomp=unconfined", // also ignore seccomp
|
||||||
|
"--tmpfs", "/tmp", // various things depend on working /tmp
|
||||||
|
"--tmpfs", "/run", // systemd wants a writable /run
|
||||||
|
// logs,pods be stroed on filesystem vs inside container,
|
||||||
|
"--volume", "/var",
|
||||||
|
// some k8s things want /lib/modules
|
||||||
|
"-v", "/lib/modules:/lib/modules:ro",
|
||||||
|
"--hostname", p.Name, // make hostname match container name
|
||||||
|
"--name", p.Name, // ... and set the container name
|
||||||
|
// label the node with the cluster ID
|
||||||
|
"--label", p.ClusterLabel,
|
||||||
|
// label the node with the role ID
|
||||||
|
"--label", fmt.Sprintf("%s=%s", nodeRoleKey, p.Role),
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, val := range p.Envs {
|
||||||
|
runArgs = append(runArgs, "-e", fmt.Sprintf("%s=%s", key, val))
|
||||||
|
}
|
||||||
|
|
||||||
|
// adds node specific args
|
||||||
|
runArgs = append(runArgs, p.ExtraArgs...)
|
||||||
|
|
||||||
|
enabled, err := isUsernsRemapEnabled(p.OCIBinary)
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("Failed to detect if userns is enabled: %v", err)
|
||||||
|
}
|
||||||
|
if enabled {
|
||||||
|
// We need this argument in order to make this command work
|
||||||
|
// in systems that have userns-remap enabled on the docker daemon
|
||||||
|
runArgs = append(runArgs, "--userns=host")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = createContainer(p.OCIBinary,
|
||||||
|
p.Image,
|
||||||
|
withRunArgs(runArgs...),
|
||||||
|
withMounts(p.Mounts),
|
||||||
|
withPortMappings(p.PortMappings),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "create a kic node")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateContainer creates a container with "docker/podman run"
|
||||||
|
func createContainer(ociBinary string, image string, opts ...createOpt) ([]string, error) {
|
||||||
|
if err := PointToHostDockerDaemon(); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "point host docker-daemon")
|
||||||
|
}
|
||||||
|
|
||||||
|
o := &createOpts{}
|
||||||
|
for _, opt := range opts {
|
||||||
|
o = opt(o)
|
||||||
|
}
|
||||||
|
// convert mounts to container run args
|
||||||
|
runArgs := o.RunArgs
|
||||||
|
for _, mount := range o.Mounts {
|
||||||
|
runArgs = append(runArgs, generateMountBindings(mount)...)
|
||||||
|
}
|
||||||
|
for _, portMapping := range o.PortMappings {
|
||||||
|
runArgs = append(runArgs, generatePortMappings(portMapping)...)
|
||||||
|
}
|
||||||
|
// construct the actual docker run argv
|
||||||
|
args := []string{"run"}
|
||||||
|
args = append(args, runArgs...)
|
||||||
|
args = append(args, image)
|
||||||
|
args = append(args, o.ContainerArgs...)
|
||||||
|
cmd := exec.Command(ociBinary, args...)
|
||||||
|
var buff bytes.Buffer
|
||||||
|
cmd.Stdout = &buff
|
||||||
|
cmd.Stderr = &buff
|
||||||
|
err := cmd.Run()
|
||||||
|
scanner := bufio.NewScanner(&buff)
|
||||||
|
var output []string
|
||||||
|
for scanner.Scan() {
|
||||||
|
output = append(output, scanner.Text())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return output, errors.Wrapf(err, "args: %v output: %s ", args, output)
|
||||||
|
}
|
||||||
|
return output, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy copies a local asset into the container
|
||||||
|
func Copy(ociBinary string, ociID string, targetDir string, fName string) error {
|
||||||
|
if err := PointToHostDockerDaemon(); err != nil {
|
||||||
|
return errors.Wrap(err, "point host docker-daemon")
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(fName); os.IsNotExist(err) {
|
||||||
|
return errors.Wrapf(err, "error source %s does not exist", fName)
|
||||||
|
}
|
||||||
|
destination := fmt.Sprintf("%s:%s", ociID, targetDir)
|
||||||
|
cmd := exec.Command(ociBinary, "cp", fName, destination)
|
||||||
err := cmd.Run()
|
err := cmd.Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error stop node %s", ociID)
|
return errors.Wrapf(err, "error copying %s into node", fName)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Status returns the status of the container
|
// HostPortBinding will return port mapping for a container using cli.
|
||||||
func Status(ociBinary string, ociID string) (state.State, error) {
|
// example : HostPortBinding("docker", "minikube", "22")
|
||||||
cmd := exec.Command(ociBinary, "inspect", "-f", "{{.State.Status}}", ociID)
|
// will return the docker assigned port:
|
||||||
|
// 32769, nil
|
||||||
|
// only supports TCP ports
|
||||||
|
func HostPortBinding(ociBinary string, ociID string, contPort int) (int, error) {
|
||||||
|
if err := PointToHostDockerDaemon(); err != nil {
|
||||||
|
return 0, errors.Wrap(err, "point host docker-daemon")
|
||||||
|
}
|
||||||
|
cmd := exec.Command(ociBinary, "inspect", "-f", fmt.Sprintf("'{{(index (index .NetworkSettings.Ports \"%d/tcp\") 0).HostPort}}'", contPort), ociID)
|
||||||
out, err := cmd.CombinedOutput()
|
out, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrapf(err, "getting host-bind port %d for container ID %q, output %s", contPort, ociID, out)
|
||||||
|
}
|
||||||
o := strings.Trim(string(out), "\n")
|
o := strings.Trim(string(out), "\n")
|
||||||
s := state.Error
|
o = strings.Trim(o, "'")
|
||||||
switch o {
|
p, err := strconv.Atoi(o)
|
||||||
case "running":
|
|
||||||
s = state.Running
|
|
||||||
case "exited":
|
|
||||||
s = state.Stopped
|
|
||||||
case "paused":
|
|
||||||
s = state.Paused
|
|
||||||
case "restaring":
|
|
||||||
s = state.Starting
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return state.Error, errors.Wrapf(err, "error getting node %s status", ociID)
|
return p, errors.Wrapf(err, "convert host-port %q to number", p)
|
||||||
}
|
}
|
||||||
return s, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SystemStatus checks if the oci container engine is running
|
// ContainerIPs returns ipv4,ipv6, error of a container by their name
|
||||||
func SystemStatus(ociBinary string, ociID string) (state.State, error) {
|
func ContainerIPs(ociBinary string, name string) (string, string, error) {
|
||||||
_, err := exec.LookPath(ociBinary)
|
if err := PointToHostDockerDaemon(); err != nil {
|
||||||
|
return "", "", errors.Wrap(err, "point host docker-daemon")
|
||||||
|
}
|
||||||
|
// retrieve the IP address of the node using docker inspect
|
||||||
|
lines, err := inspect(ociBinary, name, "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return state.Error, err
|
return "", "", errors.Wrap(err, "inspecting NetworkSettings.Networks")
|
||||||
}
|
}
|
||||||
|
if len(lines) != 1 {
|
||||||
|
return "", "", errors.Errorf("IPs output should only be one line, got %d lines", len(lines))
|
||||||
|
}
|
||||||
|
ips := strings.Split(lines[0], ",")
|
||||||
|
if len(ips) != 2 {
|
||||||
|
return "", "", errors.Errorf("container addresses should have 2 values, got %d values: %+v", len(ips), ips)
|
||||||
|
}
|
||||||
|
return ips[0], ips[1], nil
|
||||||
|
|
||||||
err = exec.Command("docker", "info").Run()
|
}
|
||||||
|
|
||||||
|
// ContainerID returns id of a container name
|
||||||
|
func ContainerID(ociBinary string, nameOrID string) (string, error) {
|
||||||
|
if err := PointToHostDockerDaemon(); err != nil {
|
||||||
|
return "", errors.Wrap(err, "point host docker-daemon")
|
||||||
|
}
|
||||||
|
cmd := exec.Command(ociBinary, "inspect", "-f", "{{.Id}}", nameOrID)
|
||||||
|
id, err := cmd.CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return state.Error, err
|
id = []byte{}
|
||||||
}
|
}
|
||||||
|
return string(id), err
|
||||||
return state.Running, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove removes a container
|
// ListOwnedContainers lists all the containres that kic driver created on user's machine using a label
|
||||||
func Remove(ociBinary string, ociID string) error {
|
func ListOwnedContainers(ociBinary string) ([]string, error) {
|
||||||
// TODO: force remove should be an option
|
return listContainersByLabel(ociBinary, ClusterLabelKey)
|
||||||
cmd := exec.Command(ociBinary, "rm", "-f", "-v", ociID)
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return errors.Wrapf(err, "error removing node %s", ociID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pause pauses a container
|
// inspect return low-level information on containers
|
||||||
func Pause(ociBinary string, ociID string) error {
|
func inspect(ociBinary string, containerNameOrID, format string) ([]string, error) {
|
||||||
cmd := exec.Command(ociBinary, "pause", ociID)
|
if err := PointToHostDockerDaemon(); err != nil {
|
||||||
if err := cmd.Run(); err != nil {
|
return nil, errors.Wrap(err, "point host docker-daemon")
|
||||||
return errors.Wrapf(err, "error pausing node %s", ociID)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inspect return low-level information on containers
|
|
||||||
func Inspect(ociBinary string, containerNameOrID, format string) ([]string, error) {
|
|
||||||
cmd := exec.Command(ociBinary, "inspect",
|
cmd := exec.Command(ociBinary, "inspect",
|
||||||
"-f", format,
|
"-f", format,
|
||||||
containerNameOrID) // ... against the "node" container
|
containerNameOrID) // ... against the "node" container
|
||||||
|
@ -123,65 +232,6 @@ func Inspect(ociBinary string, containerNameOrID, format string) ([]string, erro
|
||||||
return lines, err
|
return lines, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// NetworkInspect displays detailed information on one or more networks
|
|
||||||
func NetworkInspect(networkNames []string, format string) ([]string, error) {
|
|
||||||
cmd := exec.Command("docker", "network", "inspect",
|
|
||||||
"-f", format,
|
|
||||||
strings.Join(networkNames, " "))
|
|
||||||
var buff bytes.Buffer
|
|
||||||
cmd.Stdout = &buff
|
|
||||||
cmd.Stderr = &buff
|
|
||||||
err := cmd.Run()
|
|
||||||
scanner := bufio.NewScanner(&buff)
|
|
||||||
var lines []string
|
|
||||||
for scanner.Scan() {
|
|
||||||
lines = append(lines, scanner.Text())
|
|
||||||
}
|
|
||||||
return lines, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSubnets returns a slice of subnets for a specified network name
|
|
||||||
// For example the command : docker network inspect -f '{{range (index (index . "IPAM") "Config")}}{{index . "Subnet"}} {{end}}' bridge
|
|
||||||
// returns 172.17.0.0/16
|
|
||||||
func GetSubnets(networkName string) ([]string, error) {
|
|
||||||
format := `{{range (index (index . "IPAM") "Config")}}{{index . "Subnet"}} {{end}}`
|
|
||||||
lines, err := NetworkInspect([]string{networkName}, format)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return strings.Split(lines[0], " "), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageInspect return low-level information on containers images
|
|
||||||
func ImageInspect(containerNameOrID, format string) ([]string, error) {
|
|
||||||
cmd := exec.Command("docker", "image", "inspect",
|
|
||||||
"-f", format,
|
|
||||||
containerNameOrID,
|
|
||||||
)
|
|
||||||
var buff bytes.Buffer
|
|
||||||
cmd.Stdout = &buff
|
|
||||||
cmd.Stderr = &buff
|
|
||||||
err := cmd.Run()
|
|
||||||
scanner := bufio.NewScanner(&buff)
|
|
||||||
var lines []string
|
|
||||||
for scanner.Scan() {
|
|
||||||
lines = append(lines, scanner.Text())
|
|
||||||
}
|
|
||||||
return lines, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageID return the Id of the container image
|
|
||||||
func ImageID(containerNameOrID string) (string, error) {
|
|
||||||
lines, err := ImageInspect(containerNameOrID, "{{ .Id }}")
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if len(lines) != 1 {
|
|
||||||
return "", fmt.Errorf("docker image ID should only be one line, got %d lines", len(lines))
|
|
||||||
}
|
|
||||||
return lines[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
This is adapated from:
|
This is adapated from:
|
||||||
https://github.com/kubernetes/kubernetes/blob/07a5488b2a8f67add543da72e8819407d8314204/pkg/kubelet/dockershim/helpers.go#L115-L155
|
https://github.com/kubernetes/kubernetes/blob/07a5488b2a8f67add543da72e8819407d8314204/pkg/kubelet/dockershim/helpers.go#L115-L155
|
||||||
|
@ -228,33 +278,11 @@ func generateMountBindings(mounts ...Mount) []string {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// PullIfNotPresent pulls docker image if not present back off exponentially
|
// isUsernsRemapEnabled checks if userns-remap is enabled in docker
|
||||||
func PullIfNotPresent(ociBinary string, image string, forceUpdate bool, maxWait time.Duration) error {
|
func isUsernsRemapEnabled(ociBinary string) (bool, error) {
|
||||||
cmd := exec.Command(ociBinary, "inspect", "--type=image", image)
|
if err := PointToHostDockerDaemon(); err != nil {
|
||||||
err := cmd.Run()
|
return false, errors.Wrap(err, "point host docker-daemon")
|
||||||
if err == nil && !forceUpdate {
|
|
||||||
return nil // if presents locally and not force
|
|
||||||
}
|
}
|
||||||
b := backoff.NewExponentialBackOff()
|
|
||||||
b.MaxElapsedTime = maxWait
|
|
||||||
f := func() error {
|
|
||||||
return pull(ociBinary, image)
|
|
||||||
}
|
|
||||||
return backoff.Retry(f, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pull pulls an image, retrying up to retries times
|
|
||||||
func pull(ociBinary string, image string) error {
|
|
||||||
cmd := exec.Command(ociBinary, "pull", image)
|
|
||||||
err := cmd.Run()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error pull image %s : %v", image, err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// UsernsRemap checks if userns-remap is enabled in dockerd
|
|
||||||
func UsernsRemap(ociBinary string) bool {
|
|
||||||
cmd := exec.Command(ociBinary, "info", "--format", "'{{json .SecurityOptions}}'")
|
cmd := exec.Command(ociBinary, "info", "--format", "'{{json .SecurityOptions}}'")
|
||||||
var buff bytes.Buffer
|
var buff bytes.Buffer
|
||||||
cmd.Stdout = &buff
|
cmd.Stdout = &buff
|
||||||
|
@ -266,131 +294,86 @@ func UsernsRemap(ociBinary string) bool {
|
||||||
lines = append(lines, scanner.Text())
|
lines = append(lines, scanner.Text())
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
if len(lines) > 0 {
|
if len(lines) > 0 {
|
||||||
if strings.Contains(lines[0], "name=userns") {
|
if strings.Contains(lines[0], "name=userns") {
|
||||||
return true
|
return true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func generatePortMappings(portMappings ...PortMapping) []string {
|
func generatePortMappings(portMappings ...PortMapping) []string {
|
||||||
result := make([]string, 0, len(portMappings))
|
result := make([]string, 0, len(portMappings))
|
||||||
for _, pm := range portMappings {
|
for _, pm := range portMappings {
|
||||||
var hostPortBinding string
|
// let docker pick a host port by leaving it as ::
|
||||||
if pm.ListenAddress != "" {
|
// example --publish=127.0.0.17::8443 will get a random host port for 8443
|
||||||
hostPortBinding = net.JoinHostPort(pm.ListenAddress, fmt.Sprintf("%d", pm.HostPort))
|
publish := fmt.Sprintf("--publish=%s::%d", pm.ListenAddress, pm.ContainerPort)
|
||||||
} else {
|
|
||||||
hostPortBinding = fmt.Sprintf("%d", pm.HostPort)
|
|
||||||
}
|
|
||||||
publish := fmt.Sprintf("--publish=%s:%d", hostPortBinding, pm.ContainerPort)
|
|
||||||
result = append(result, publish)
|
result = append(result, publish)
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save saves an image archive "docker/podman save"
|
// withRunArgs sets the args for docker run
|
||||||
func Save(ociBinary string, image, dest string) error {
|
|
||||||
cmd := exec.Command(ociBinary, "save", "-o", dest, image)
|
|
||||||
var buff bytes.Buffer
|
|
||||||
cmd.Stdout = &buff
|
|
||||||
cmd.Stderr = &buff
|
|
||||||
err := cmd.Run()
|
|
||||||
scanner := bufio.NewScanner(&buff)
|
|
||||||
var lines []string
|
|
||||||
for scanner.Scan() {
|
|
||||||
lines = append(lines, scanner.Text())
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "saving image to tar failed, output %s", lines[0])
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateOpt is an option for Create
|
|
||||||
type CreateOpt func(*createOpts) *createOpts
|
|
||||||
|
|
||||||
// actual options struct
|
|
||||||
type createOpts struct {
|
|
||||||
RunArgs []string
|
|
||||||
ContainerArgs []string
|
|
||||||
Mounts []Mount
|
|
||||||
PortMappings []PortMapping
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateContainer creates a container with "docker/podman run"
|
|
||||||
func CreateContainer(ociBinary string, image string, opts ...CreateOpt) ([]string, error) {
|
|
||||||
o := &createOpts{}
|
|
||||||
for _, opt := range opts {
|
|
||||||
o = opt(o)
|
|
||||||
}
|
|
||||||
// convert mounts to container run args
|
|
||||||
runArgs := o.RunArgs
|
|
||||||
for _, mount := range o.Mounts {
|
|
||||||
runArgs = append(runArgs, generateMountBindings(mount)...)
|
|
||||||
}
|
|
||||||
for _, portMapping := range o.PortMappings {
|
|
||||||
runArgs = append(runArgs, generatePortMappings(portMapping)...)
|
|
||||||
}
|
|
||||||
// construct the actual docker run argv
|
|
||||||
args := []string{"run"}
|
|
||||||
args = append(args, runArgs...)
|
|
||||||
args = append(args, image)
|
|
||||||
args = append(args, o.ContainerArgs...)
|
|
||||||
cmd := exec.Command(ociBinary, args...)
|
|
||||||
var buff bytes.Buffer
|
|
||||||
cmd.Stdout = &buff
|
|
||||||
cmd.Stderr = &buff
|
|
||||||
err := cmd.Run()
|
|
||||||
scanner := bufio.NewScanner(&buff)
|
|
||||||
var output []string
|
|
||||||
for scanner.Scan() {
|
|
||||||
output = append(output, scanner.Text())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return output, errors.Wrapf(err, "args: %v output: %s ", args, output)
|
|
||||||
}
|
|
||||||
return output, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithRunArgs sets the args for docker run
|
|
||||||
// as in the args portion of `docker run args... image containerArgs...`
|
// as in the args portion of `docker run args... image containerArgs...`
|
||||||
func WithRunArgs(args ...string) CreateOpt {
|
func withRunArgs(args ...string) createOpt {
|
||||||
return func(r *createOpts) *createOpts {
|
return func(r *createOpts) *createOpts {
|
||||||
r.RunArgs = args
|
r.RunArgs = args
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithMounts sets the container mounts
|
// withMounts sets the container mounts
|
||||||
func WithMounts(mounts []Mount) CreateOpt {
|
func withMounts(mounts []Mount) createOpt {
|
||||||
return func(r *createOpts) *createOpts {
|
return func(r *createOpts) *createOpts {
|
||||||
r.Mounts = mounts
|
r.Mounts = mounts
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithPortMappings sets the container port mappings to the host
|
// withPortMappings sets the container port mappings to the host
|
||||||
func WithPortMappings(portMappings []PortMapping) CreateOpt {
|
func withPortMappings(portMappings []PortMapping) createOpt {
|
||||||
return func(r *createOpts) *createOpts {
|
return func(r *createOpts) *createOpts {
|
||||||
r.PortMappings = portMappings
|
r.PortMappings = portMappings
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy copies a local asset into the container
|
// listContainersByLabel lists all the containres that kic driver created on user's machine using a label
|
||||||
func Copy(ociBinary string, ociID string, asset assets.CopyableFile) error {
|
// io.x-k8s.kic.cluster
|
||||||
if _, err := os.Stat(asset.GetAssetName()); os.IsNotExist(err) {
|
func listContainersByLabel(ociBinary string, label string) ([]string, error) {
|
||||||
return errors.Wrapf(err, "error source %s does not exist", asset.GetAssetName())
|
if err := PointToHostDockerDaemon(); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "point host docker-daemon")
|
||||||
}
|
}
|
||||||
destination := fmt.Sprintf("%s:%s", ociID, asset.GetTargetDir())
|
cmd := exec.Command(ociBinary, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}")
|
||||||
cmd := exec.Command(ociBinary, "cp", asset.GetAssetName(), destination)
|
var b bytes.Buffer
|
||||||
|
cmd.Stdout = &b
|
||||||
|
cmd.Stderr = &b
|
||||||
err := cmd.Run()
|
err := cmd.Run()
|
||||||
if err != nil {
|
var lines []string
|
||||||
return errors.Wrapf(err, "error copying %s into node", asset.GetAssetName())
|
sc := bufio.NewScanner(&b)
|
||||||
|
for sc.Scan() {
|
||||||
|
lines = append(lines, sc.Text())
|
||||||
|
}
|
||||||
|
return lines, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PointToHostDockerDaemon will unset env variables that point to docker inside minikube
|
||||||
|
// to make sure it points to the docker daemon installed by user.
|
||||||
|
func PointToHostDockerDaemon() error {
|
||||||
|
p := os.Getenv(constants.MinikubeActiveDockerdEnv)
|
||||||
|
if p != "" {
|
||||||
|
glog.Infof("shell is pointing to docker inside minikube. will unset to use host")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range constants.DockerDaemonEnvs {
|
||||||
|
e := constants.DockerDaemonEnvs[i]
|
||||||
|
err := os.Setenv(e, "")
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "resetting %s env", e)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,8 +19,38 @@ package oci
|
||||||
const (
|
const (
|
||||||
Docker = "docker"
|
Docker = "docker"
|
||||||
Podman = "podman"
|
Podman = "podman"
|
||||||
|
// ClusterLabelKey is applied to each node docker container for identification
|
||||||
|
ClusterLabelKey = "io.x-k8s.kic.cluster"
|
||||||
|
// NodeRoleKey is used to identify if it is control plane or worker
|
||||||
|
nodeRoleKey = "io.k8s.sigs.kic.role"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type CreateParams struct {
|
||||||
|
Name string // used for container name and hostname
|
||||||
|
Image string // container image to use to create the node.
|
||||||
|
ClusterLabel string // label the containers we create using minikube so we can clean up
|
||||||
|
Role string // currently only role supported is control-plane
|
||||||
|
Mounts []Mount // volume mounts
|
||||||
|
APIServerPort int // kubernetes api server port
|
||||||
|
PortMappings []PortMapping // ports to map to container from host
|
||||||
|
CPUs string // number of cpu cores assign to container
|
||||||
|
Memory string // memory (mbs) to assign to the container
|
||||||
|
Envs map[string]string // environment variables to pass to the container
|
||||||
|
ExtraArgs []string // a list of any extra option to pass to oci binary during creation time, for example --expose 8080...
|
||||||
|
OCIBinary string // docker or podman
|
||||||
|
}
|
||||||
|
|
||||||
|
// createOpt is an option for Create
|
||||||
|
type createOpt func(*createOpts) *createOpts
|
||||||
|
|
||||||
|
// actual options struct
|
||||||
|
type createOpts struct {
|
||||||
|
RunArgs []string
|
||||||
|
ContainerArgs []string
|
||||||
|
Mounts []Mount
|
||||||
|
PortMappings []PortMapping
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
These types are from
|
These types are from
|
||||||
https://github.com/kubernetes/kubernetes/blob/063e7ff358fdc8b0916e6f39beedc0d025734cb1/pkg/kubelet/apis/cri/runtime/v1alpha2/api.pb.go#L183
|
https://github.com/kubernetes/kubernetes/blob/063e7ff358fdc8b0916e6f39beedc0d025734cb1/pkg/kubelet/apis/cri/runtime/v1alpha2/api.pb.go#L183
|
||||||
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package kic
|
||||||
|
|
||||||
|
import "k8s.io/minikube/pkg/drivers/kic/oci"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Docker default bridge network is named "bridge" (https://docs.docker.com/network/bridge/#use-the-default-bridge-network)
|
||||||
|
DefaultNetwork = "bridge"
|
||||||
|
// DefaultPodCIDR is The CIDR to be used for pods inside the node.
|
||||||
|
DefaultPodCIDR = "10.244.0.0/16"
|
||||||
|
// DefaultBindIPV4 is The default IP the container will bind to.
|
||||||
|
DefaultBindIPV4 = "127.0.0.1"
|
||||||
|
// BaseImage is the base image is used to spin up kic containers. it uses same base-image as kind.
|
||||||
|
BaseImage = "gcr.io/k8s-minikube/kicbase:v0.0.5@sha256:3ddd8461dfb5c3e452ccc44d87750b87a574ec23fc425da67dccc1f0c57d428a"
|
||||||
|
// OverlayImage is the cni plugin used for overlay image, created by kind.
|
||||||
|
// CNI plugin image used for kic drivers created by kind.
|
||||||
|
OverlayImage = "kindest/kindnetd:0.5.3"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config is configuration for the kic driver used by registry
|
||||||
|
type Config struct {
|
||||||
|
MachineName string // maps to the container name being created
|
||||||
|
CPU int // Number of CPU cores assigned to the container
|
||||||
|
Memory int // max memory in MB
|
||||||
|
StorePath string // libmachine store path
|
||||||
|
OCIBinary string // oci tool to use (docker, podman,...)
|
||||||
|
ImageDigest string // image name with sha to use for the node
|
||||||
|
Mounts []oci.Mount // mounts
|
||||||
|
APIServerPort int // kubernetes api server port inside the container
|
||||||
|
PortMappings []oci.PortMapping // container port mappings
|
||||||
|
Envs map[string]string // key,value of environment variables passed to the node
|
||||||
|
}
|
|
@ -18,6 +18,7 @@ package none
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -26,10 +27,13 @@ import (
|
||||||
"github.com/docker/machine/libmachine/state"
|
"github.com/docker/machine/libmachine/state"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"k8s.io/apimachinery/pkg/util/net"
|
knet "k8s.io/apimachinery/pkg/util/net"
|
||||||
pkgdrivers "k8s.io/minikube/pkg/drivers"
|
pkgdrivers "k8s.io/minikube/pkg/drivers"
|
||||||
|
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
|
||||||
"k8s.io/minikube/pkg/minikube/command"
|
"k8s.io/minikube/pkg/minikube/command"
|
||||||
|
"k8s.io/minikube/pkg/minikube/constants"
|
||||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||||
|
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
||||||
"k8s.io/minikube/pkg/minikube/vmpath"
|
"k8s.io/minikube/pkg/minikube/vmpath"
|
||||||
"k8s.io/minikube/pkg/util/retry"
|
"k8s.io/minikube/pkg/util/retry"
|
||||||
)
|
)
|
||||||
|
@ -60,7 +64,7 @@ type Config struct {
|
||||||
|
|
||||||
// NewDriver returns a fully configured None driver
|
// NewDriver returns a fully configured None driver
|
||||||
func NewDriver(c Config) *Driver {
|
func NewDriver(c Config) *Driver {
|
||||||
runner := &command.ExecRunner{}
|
runner := command.NewExecRunner()
|
||||||
runtime, err := cruntime.New(cruntime.Config{Type: c.ContainerRuntime, Runner: runner})
|
runtime, err := cruntime.New(cruntime.Config{Type: c.ContainerRuntime, Runner: runner})
|
||||||
// Libraries shouldn't panic, but there is no way for drivers to return error :(
|
// Libraries shouldn't panic, but there is no way for drivers to return error :(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -94,7 +98,7 @@ func (d *Driver) DriverName() string {
|
||||||
|
|
||||||
// GetIP returns an IP or hostname that this host is available at
|
// GetIP returns an IP or hostname that this host is available at
|
||||||
func (d *Driver) GetIP() (string, error) {
|
func (d *Driver) GetIP() (string, error) {
|
||||||
ip, err := net.ChooseBindAddress(nil)
|
ip, err := knet.ChooseHostInterface()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -123,11 +127,30 @@ func (d *Driver) GetURL() (string, error) {
|
||||||
|
|
||||||
// GetState returns the state that the host is in (running, stopped, etc)
|
// GetState returns the state that the host is in (running, stopped, etc)
|
||||||
func (d *Driver) GetState() (state.State, error) {
|
func (d *Driver) GetState() (state.State, error) {
|
||||||
if err := checkKubelet(d.exec); err != nil {
|
glog.Infof("GetState called")
|
||||||
glog.Infof("kubelet not running: %v", err)
|
ip, err := d.GetIP()
|
||||||
return state.Stopped, nil
|
if err != nil {
|
||||||
|
return state.Error, err
|
||||||
}
|
}
|
||||||
return state.Running, nil
|
|
||||||
|
port, err := kubeconfig.Port(d.BaseDriver.MachineName)
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("unable to get port: %v", err)
|
||||||
|
port = constants.APIServerPort
|
||||||
|
}
|
||||||
|
|
||||||
|
// Confusing logic, as libmachine.Stop will loop until the state == Stopped
|
||||||
|
ast, err := kverify.APIServerStatus(d.exec, net.ParseIP(ip), port)
|
||||||
|
if err != nil {
|
||||||
|
return ast, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the apiserver is up, we'll claim to be up.
|
||||||
|
if ast == state.Paused || ast == state.Running {
|
||||||
|
return state.Running, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return kverify.KubeletStatus(d.exec)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kill stops a host forcefully, including any containers that we are managing.
|
// Kill stops a host forcefully, including any containers that we are managing.
|
||||||
|
@ -137,7 +160,7 @@ func (d *Driver) Kill() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// First try to gracefully stop containers
|
// First try to gracefully stop containers
|
||||||
containers, err := d.runtime.ListContainers("")
|
containers, err := d.runtime.ListContainers(cruntime.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "containers")
|
return errors.Wrap(err, "containers")
|
||||||
}
|
}
|
||||||
|
@ -149,7 +172,7 @@ func (d *Driver) Kill() error {
|
||||||
return errors.Wrap(err, "stop")
|
return errors.Wrap(err, "stop")
|
||||||
}
|
}
|
||||||
|
|
||||||
containers, err = d.runtime.ListContainers("")
|
containers, err = d.runtime.ListContainers(cruntime.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "containers")
|
return errors.Wrap(err, "containers")
|
||||||
}
|
}
|
||||||
|
@ -197,17 +220,18 @@ func (d *Driver) Start() error {
|
||||||
// Stop a host gracefully, including any containers that we are managing.
|
// Stop a host gracefully, including any containers that we are managing.
|
||||||
func (d *Driver) Stop() error {
|
func (d *Driver) Stop() error {
|
||||||
if err := stopKubelet(d.exec); err != nil {
|
if err := stopKubelet(d.exec); err != nil {
|
||||||
return err
|
return errors.Wrap(err, "stop kubelet")
|
||||||
}
|
}
|
||||||
containers, err := d.runtime.ListContainers("")
|
containers, err := d.runtime.ListContainers(cruntime.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "containers")
|
return errors.Wrap(err, "containers")
|
||||||
}
|
}
|
||||||
if len(containers) > 0 {
|
if len(containers) > 0 {
|
||||||
if err := d.runtime.StopContainers(containers); err != nil {
|
if err := d.runtime.StopContainers(containers); err != nil {
|
||||||
return errors.Wrap(err, "stop")
|
return errors.Wrap(err, "stop containers")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
glog.Infof("none driver is stopped!")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -251,13 +275,3 @@ func restartKubelet(cr command.Runner) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkKubelet returns an error if the kubelet is not running.
|
|
||||||
func checkKubelet(cr command.Runner) error {
|
|
||||||
glog.Infof("checking for running kubelet ...")
|
|
||||||
c := exec.Command("systemctl", "is-active", "--quiet", "service", "kubelet")
|
|
||||||
if _, err := cr.RunCmd(c); err != nil {
|
|
||||||
return errors.Wrap(err, "check kubelet")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -17,19 +17,13 @@ limitations under the License.
|
||||||
package assets
|
package assets
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/viper"
|
|
||||||
"k8s.io/minikube/pkg/minikube/config"
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
"k8s.io/minikube/pkg/minikube/constants"
|
"k8s.io/minikube/pkg/minikube/constants"
|
||||||
"k8s.io/minikube/pkg/minikube/localpath"
|
|
||||||
"k8s.io/minikube/pkg/minikube/vmpath"
|
"k8s.io/minikube/pkg/minikube/vmpath"
|
||||||
"k8s.io/minikube/pkg/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Addon is a named list of assets, that can be enabled
|
// Addon is a named list of assets, that can be enabled
|
||||||
|
@ -54,34 +48,34 @@ func (a *Addon) Name() string {
|
||||||
return a.addonName
|
return a.addonName
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsEnabled checks if an Addon is enabled for the current profile
|
// IsEnabled checks if an Addon is enabled for the given profile
|
||||||
func (a *Addon) IsEnabled() (bool, error) {
|
func (a *Addon) IsEnabled(profile string) (bool, error) {
|
||||||
c, err := config.Load(viper.GetString(config.MachineProfile))
|
c, err := config.Load(profile)
|
||||||
if err == nil {
|
if err != nil {
|
||||||
if status, ok := c.Addons[a.Name()]; ok {
|
return false, errors.Wrap(err, "load")
|
||||||
return status, nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Is this addon explicitly listed in their configuration?
|
||||||
|
status, ok := c.Addons[a.Name()]
|
||||||
|
glog.V(1).Infof("IsEnabled %q = %v (listed in config=%v)", a.Name(), status, ok)
|
||||||
|
if ok {
|
||||||
|
return status, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the default unconfigured state of the addon
|
||||||
return a.enabled, nil
|
return a.enabled, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Addons is the list of addons
|
// Addons is the list of addons
|
||||||
// TODO: Make dynamically loadable: move this data to a .yaml file within each addon directory
|
// TODO: Make dynamically loadable: move this data to a .yaml file within each addon directory
|
||||||
var Addons = map[string]*Addon{
|
var Addons = map[string]*Addon{
|
||||||
"addon-manager": NewAddon([]*BinAsset{
|
|
||||||
MustBinAsset(
|
|
||||||
"deploy/addons/addon-manager.yaml.tmpl",
|
|
||||||
vmpath.GuestManifestsDir,
|
|
||||||
"addon-manager.yaml.tmpl",
|
|
||||||
"0640",
|
|
||||||
true),
|
|
||||||
}, true, "addon-manager"),
|
|
||||||
"dashboard": NewAddon([]*BinAsset{
|
"dashboard": NewAddon([]*BinAsset{
|
||||||
|
// We want to create the kubernetes-dashboard ns first so that every subsequent object can be created
|
||||||
|
MustBinAsset("deploy/addons/dashboard/dashboard-ns.yaml", vmpath.GuestAddonsDir, "dashboard-ns.yaml", "0640", false),
|
||||||
MustBinAsset("deploy/addons/dashboard/dashboard-clusterrole.yaml", vmpath.GuestAddonsDir, "dashboard-clusterrole.yaml", "0640", false),
|
MustBinAsset("deploy/addons/dashboard/dashboard-clusterrole.yaml", vmpath.GuestAddonsDir, "dashboard-clusterrole.yaml", "0640", false),
|
||||||
MustBinAsset("deploy/addons/dashboard/dashboard-clusterrolebinding.yaml", vmpath.GuestAddonsDir, "dashboard-clusterrolebinding.yaml", "0640", false),
|
MustBinAsset("deploy/addons/dashboard/dashboard-clusterrolebinding.yaml", vmpath.GuestAddonsDir, "dashboard-clusterrolebinding.yaml", "0640", false),
|
||||||
MustBinAsset("deploy/addons/dashboard/dashboard-configmap.yaml", vmpath.GuestAddonsDir, "dashboard-configmap.yaml", "0640", false),
|
MustBinAsset("deploy/addons/dashboard/dashboard-configmap.yaml", vmpath.GuestAddonsDir, "dashboard-configmap.yaml", "0640", false),
|
||||||
MustBinAsset("deploy/addons/dashboard/dashboard-dp.yaml", vmpath.GuestAddonsDir, "dashboard-dp.yaml", "0640", false),
|
MustBinAsset("deploy/addons/dashboard/dashboard-dp.yaml", vmpath.GuestAddonsDir, "dashboard-dp.yaml", "0640", false),
|
||||||
MustBinAsset("deploy/addons/dashboard/dashboard-ns.yaml", vmpath.GuestAddonsDir, "dashboard-ns.yaml", "0640", false),
|
|
||||||
MustBinAsset("deploy/addons/dashboard/dashboard-role.yaml", vmpath.GuestAddonsDir, "dashboard-role.yaml", "0640", false),
|
MustBinAsset("deploy/addons/dashboard/dashboard-role.yaml", vmpath.GuestAddonsDir, "dashboard-role.yaml", "0640", false),
|
||||||
MustBinAsset("deploy/addons/dashboard/dashboard-rolebinding.yaml", vmpath.GuestAddonsDir, "dashboard-rolebinding.yaml", "0640", false),
|
MustBinAsset("deploy/addons/dashboard/dashboard-rolebinding.yaml", vmpath.GuestAddonsDir, "dashboard-rolebinding.yaml", "0640", false),
|
||||||
MustBinAsset("deploy/addons/dashboard/dashboard-sa.yaml", vmpath.GuestAddonsDir, "dashboard-sa.yaml", "0640", false),
|
MustBinAsset("deploy/addons/dashboard/dashboard-sa.yaml", vmpath.GuestAddonsDir, "dashboard-sa.yaml", "0640", false),
|
||||||
|
@ -340,60 +334,6 @@ var Addons = map[string]*Addon{
|
||||||
}, false, "ingress-dns"),
|
}, false, "ingress-dns"),
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddMinikubeDirAssets adds all addons and files to the list
|
|
||||||
// of files to be copied to the vm.
|
|
||||||
func AddMinikubeDirAssets(assets *[]CopyableFile) error {
|
|
||||||
if err := addMinikubeDirToAssets(localpath.MakeMiniPath("addons"), vmpath.GuestAddonsDir, assets); err != nil {
|
|
||||||
return errors.Wrap(err, "adding addons folder to assets")
|
|
||||||
}
|
|
||||||
if err := addMinikubeDirToAssets(localpath.MakeMiniPath("files"), "", assets); err != nil {
|
|
||||||
return errors.Wrap(err, "adding files rootfs to assets")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddMinikubeDirToAssets adds all the files in the basedir argument to the list
|
|
||||||
// of files to be copied to the vm. If vmpath is left blank, the files will be
|
|
||||||
// transferred to the location according to their relative minikube folder path.
|
|
||||||
func addMinikubeDirToAssets(basedir, vmpath string, assets *[]CopyableFile) error {
|
|
||||||
return filepath.Walk(basedir, func(hostpath string, info os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
isDir, err := util.IsDirectory(hostpath)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "checking if %s is directory", hostpath)
|
|
||||||
}
|
|
||||||
if !isDir {
|
|
||||||
vmdir := vmpath
|
|
||||||
if vmdir == "" {
|
|
||||||
rPath, err := filepath.Rel(basedir, hostpath)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "generating relative path")
|
|
||||||
}
|
|
||||||
rPath = filepath.Dir(rPath)
|
|
||||||
rPath = filepath.ToSlash(rPath)
|
|
||||||
vmdir = path.Join("/", rPath)
|
|
||||||
}
|
|
||||||
permString := fmt.Sprintf("%o", info.Mode().Perm())
|
|
||||||
// The conversion will strip the leading 0 if present, so add it back
|
|
||||||
// if we need to.
|
|
||||||
if len(permString) == 3 {
|
|
||||||
permString = fmt.Sprintf("0%s", permString)
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := NewFileAsset(hostpath, vmdir, filepath.Base(hostpath), permString)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "creating file asset for %s", hostpath)
|
|
||||||
}
|
|
||||||
*assets = append(*assets, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateTemplateData generates template data for template assets
|
// GenerateTemplateData generates template data for template assets
|
||||||
func GenerateTemplateData(cfg config.KubernetesConfig) interface{} {
|
func GenerateTemplateData(cfg config.KubernetesConfig) interface{} {
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,7 @@ type CopyableFile interface {
|
||||||
GetTargetName() string
|
GetTargetName() string
|
||||||
GetPermissions() string
|
GetPermissions() string
|
||||||
GetModTime() (time.Time, error)
|
GetModTime() (time.Time, error)
|
||||||
|
Seek(int64, int) (int64, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// BaseAsset is the base asset class
|
// BaseAsset is the base asset class
|
||||||
|
@ -76,7 +77,7 @@ func (b *BaseAsset) GetModTime() (time.Time, error) {
|
||||||
// FileAsset is an asset using a file
|
// FileAsset is an asset using a file
|
||||||
type FileAsset struct {
|
type FileAsset struct {
|
||||||
BaseAsset
|
BaseAsset
|
||||||
reader io.Reader
|
reader io.ReadSeeker
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMemoryAssetTarget creates a new MemoryAsset, with target
|
// NewMemoryAssetTarget creates a new MemoryAsset, with target
|
||||||
|
@ -91,6 +92,11 @@ func NewFileAsset(src, targetDir, targetName, permissions string) (*FileAsset, e
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error opening file asset: %s", src)
|
return nil, errors.Wrapf(err, "Error opening file asset: %s", src)
|
||||||
}
|
}
|
||||||
|
info, err := os.Stat(src)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "Error getting info for %s", src)
|
||||||
|
}
|
||||||
|
r := io.NewSectionReader(f, 0, info.Size())
|
||||||
return &FileAsset{
|
return &FileAsset{
|
||||||
BaseAsset: BaseAsset{
|
BaseAsset: BaseAsset{
|
||||||
AssetName: src,
|
AssetName: src,
|
||||||
|
@ -98,7 +104,7 @@ func NewFileAsset(src, targetDir, targetName, permissions string) (*FileAsset, e
|
||||||
TargetName: targetName,
|
TargetName: targetName,
|
||||||
Permissions: permissions,
|
Permissions: permissions,
|
||||||
},
|
},
|
||||||
reader: f,
|
reader: r,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,6 +123,7 @@ func (f *FileAsset) GetModTime() (time.Time, error) {
|
||||||
return fi.ModTime(), err
|
return fi.ModTime(), err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Read reads the asset
|
||||||
func (f *FileAsset) Read(p []byte) (int, error) {
|
func (f *FileAsset) Read(p []byte) (int, error) {
|
||||||
if f.reader == nil {
|
if f.reader == nil {
|
||||||
return 0, errors.New("Error attempting FileAsset.Read, FileAsset.reader uninitialized")
|
return 0, errors.New("Error attempting FileAsset.Read, FileAsset.reader uninitialized")
|
||||||
|
@ -124,10 +131,15 @@ func (f *FileAsset) Read(p []byte) (int, error) {
|
||||||
return f.reader.Read(p)
|
return f.reader.Read(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Seek resets the reader to offset
|
||||||
|
func (f *FileAsset) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
return f.reader.Seek(offset, whence)
|
||||||
|
}
|
||||||
|
|
||||||
// MemoryAsset is a memory-based asset
|
// MemoryAsset is a memory-based asset
|
||||||
type MemoryAsset struct {
|
type MemoryAsset struct {
|
||||||
BaseAsset
|
BaseAsset
|
||||||
reader io.Reader
|
reader io.ReadSeeker
|
||||||
length int
|
length int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,6 +153,11 @@ func (m *MemoryAsset) Read(p []byte) (int, error) {
|
||||||
return m.reader.Read(p)
|
return m.reader.Read(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Seek resets the reader to offset
|
||||||
|
func (m *MemoryAsset) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
return m.reader.Seek(offset, whence)
|
||||||
|
}
|
||||||
|
|
||||||
// NewMemoryAsset creates a new MemoryAsset
|
// NewMemoryAsset creates a new MemoryAsset
|
||||||
func NewMemoryAsset(d []byte, targetDir, targetName, permissions string) *MemoryAsset {
|
func NewMemoryAsset(d []byte, targetDir, targetName, permissions string) *MemoryAsset {
|
||||||
return &MemoryAsset{
|
return &MemoryAsset{
|
||||||
|
@ -157,7 +174,7 @@ func NewMemoryAsset(d []byte, targetDir, targetName, permissions string) *Memory
|
||||||
// BinAsset is a bindata (binary data) asset
|
// BinAsset is a bindata (binary data) asset
|
||||||
type BinAsset struct {
|
type BinAsset struct {
|
||||||
BaseAsset
|
BaseAsset
|
||||||
reader io.Reader
|
reader io.ReadSeeker
|
||||||
template *template.Template
|
template *template.Template
|
||||||
length int
|
length int
|
||||||
}
|
}
|
||||||
|
@ -253,3 +270,8 @@ func (m *BinAsset) Read(p []byte) (int, error) {
|
||||||
}
|
}
|
||||||
return m.reader.Read(p)
|
return m.reader.Read(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Seek resets the reader to offset
|
||||||
|
func (m *BinAsset) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
return m.reader.Seek(offset, whence)
|
||||||
|
}
|
||||||
|
|
|
@ -37,13 +37,13 @@ type LogOptions struct {
|
||||||
type Bootstrapper interface {
|
type Bootstrapper interface {
|
||||||
// PullImages pulls images necessary for a cluster. Success should not be required.
|
// PullImages pulls images necessary for a cluster. Success should not be required.
|
||||||
PullImages(config.KubernetesConfig) error
|
PullImages(config.KubernetesConfig) error
|
||||||
StartCluster(config.KubernetesConfig) error
|
StartCluster(config.MachineConfig) error
|
||||||
UpdateCluster(config.MachineConfig) error
|
UpdateCluster(config.MachineConfig) error
|
||||||
DeleteCluster(config.KubernetesConfig) error
|
DeleteCluster(config.KubernetesConfig) error
|
||||||
WaitForCluster(config.KubernetesConfig, time.Duration) error
|
WaitForCluster(config.MachineConfig, time.Duration) error
|
||||||
// LogCommands returns a map of log type to a command which will display that log.
|
// LogCommands returns a map of log type to a command which will display that log.
|
||||||
LogCommands(LogOptions) map[string]string
|
LogCommands(LogOptions) map[string]string
|
||||||
SetupCerts(cfg config.KubernetesConfig) error
|
SetupCerts(config.KubernetesConfig, config.Node) error
|
||||||
GetKubeletStatus() (string, error)
|
GetKubeletStatus() (string, error)
|
||||||
GetAPIServerStatus(net.IP, int) (string, error)
|
GetAPIServerStatus(net.IP, int) (string, error)
|
||||||
}
|
}
|
||||||
|
@ -51,12 +51,11 @@ type Bootstrapper interface {
|
||||||
const (
|
const (
|
||||||
// Kubeadm is the kubeadm bootstrapper type
|
// Kubeadm is the kubeadm bootstrapper type
|
||||||
Kubeadm = "kubeadm"
|
Kubeadm = "kubeadm"
|
||||||
KIC = "kic"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetCachedBinaryList returns the list of binaries
|
// GetCachedBinaryList returns the list of binaries
|
||||||
func GetCachedBinaryList(bootstrapper string) []string {
|
func GetCachedBinaryList(bootstrapper string) []string {
|
||||||
return constants.KubeadmBinaries
|
return constants.KubernetesReleaseBinaries
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetCachedImageList returns the list of images for a version
|
// GetCachedImageList returns the list of images for a version
|
||||||
|
|
|
@ -18,6 +18,7 @@ limitations under the License.
|
||||||
package bsutil
|
package bsutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os/exec"
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
|
@ -32,8 +33,14 @@ import (
|
||||||
|
|
||||||
// TransferBinaries transfers all required Kubernetes binaries
|
// TransferBinaries transfers all required Kubernetes binaries
|
||||||
func TransferBinaries(cfg config.KubernetesConfig, c command.Runner) error {
|
func TransferBinaries(cfg config.KubernetesConfig, c command.Runner) error {
|
||||||
|
dir := binRoot(cfg.KubernetesVersion)
|
||||||
|
_, err := c.RunCmd(exec.Command("sudo", "mkdir", "-p", dir))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var g errgroup.Group
|
var g errgroup.Group
|
||||||
for _, name := range constants.KubeadmBinaries {
|
for _, name := range constants.KubernetesReleaseBinaries {
|
||||||
name := name
|
name := name
|
||||||
g.Go(func() error {
|
g.Go(func() error {
|
||||||
src, err := machine.CacheBinary(name, cfg.KubernetesVersion, "linux", runtime.GOARCH)
|
src, err := machine.CacheBinary(name, cfg.KubernetesVersion, "linux", runtime.GOARCH)
|
||||||
|
@ -41,7 +48,7 @@ func TransferBinaries(cfg config.KubernetesConfig, c command.Runner) error {
|
||||||
return errors.Wrapf(err, "downloading %s", name)
|
return errors.Wrapf(err, "downloading %s", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
dst := path.Join(binRoot(cfg.KubernetesVersion), name)
|
dst := path.Join(dir, name)
|
||||||
if err := machine.CopyBinary(c, src, dst); err != nil {
|
if err := machine.CopyBinary(c, src, dst); err != nil {
|
||||||
return errors.Wrapf(err, "copybinary %s -> %s", src, dst)
|
return errors.Wrapf(err, "copybinary %s -> %s", src, dst)
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@ package bsutil
|
||||||
import (
|
import (
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"k8s.io/minikube/pkg/minikube/assets"
|
"k8s.io/minikube/pkg/minikube/assets"
|
||||||
"k8s.io/minikube/pkg/minikube/config"
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
"k8s.io/minikube/pkg/minikube/vmpath"
|
"k8s.io/minikube/pkg/minikube/vmpath"
|
||||||
|
@ -52,33 +51,3 @@ func ConfigFileAssets(cfg config.KubernetesConfig, kubeadm []byte, kubelet []byt
|
||||||
}
|
}
|
||||||
return fs
|
return fs
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddAddons adds addons to list of files
|
|
||||||
func AddAddons(files *[]assets.CopyableFile, data interface{}) error {
|
|
||||||
// add addons to file list
|
|
||||||
// custom addons
|
|
||||||
if err := assets.AddMinikubeDirAssets(files); err != nil {
|
|
||||||
return errors.Wrap(err, "adding minikube dir assets")
|
|
||||||
}
|
|
||||||
// bundled addons
|
|
||||||
for _, addonBundle := range assets.Addons {
|
|
||||||
if isEnabled, err := addonBundle.IsEnabled(); err == nil && isEnabled {
|
|
||||||
for _, addon := range addonBundle.Assets {
|
|
||||||
if addon.IsTemplate() {
|
|
||||||
addonFile, err := addon.Evaluate(data)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "evaluate bundled addon %s asset", addon.GetAssetName())
|
|
||||||
}
|
|
||||||
|
|
||||||
*files = append(*files, addonFile)
|
|
||||||
} else {
|
|
||||||
*files = append(*files, addon)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -48,7 +48,7 @@ kind: ClusterConfiguration
|
||||||
{{$i}}: {{$val}}{{end}}
|
{{$i}}: {{$val}}{{end}}
|
||||||
{{end -}}
|
{{end -}}
|
||||||
certificatesDir: {{.CertDir}}
|
certificatesDir: {{.CertDir}}
|
||||||
clusterName: kubernetes
|
clusterName: {{.ClusterName}}
|
||||||
apiServerCertSANs: ["127.0.0.1", "localhost", "{{.AdvertiseAddress}}"]
|
apiServerCertSANs: ["127.0.0.1", "localhost", "{{.AdvertiseAddress}}"]
|
||||||
controlPlaneEndpoint: localhost:{{.APIServerPort}}
|
controlPlaneEndpoint: localhost:{{.APIServerPort}}
|
||||||
etcd:
|
etcd:
|
||||||
|
|
|
@ -51,7 +51,7 @@ kind: ClusterConfiguration
|
||||||
{{range $i, $val := .FeatureArgs}}{{$i}}: {{$val}}
|
{{range $i, $val := .FeatureArgs}}{{$i}}: {{$val}}
|
||||||
{{end -}}{{end -}}
|
{{end -}}{{end -}}
|
||||||
certificatesDir: {{.CertDir}}
|
certificatesDir: {{.CertDir}}
|
||||||
clusterName: kubernetes
|
clusterName: {{.ClusterName}}
|
||||||
apiServer:
|
apiServer:
|
||||||
certSANs: ["127.0.0.1", "localhost", "{{.AdvertiseAddress}}"]
|
certSANs: ["127.0.0.1", "localhost", "{{.AdvertiseAddress}}"]
|
||||||
controlPlaneEndpoint: localhost:{{.APIServerPort}}
|
controlPlaneEndpoint: localhost:{{.APIServerPort}}
|
||||||
|
@ -60,6 +60,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: {{.EtcdDataDir}}
|
dataDir: {{.EtcdDataDir}}
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://{{.AdvertiseAddress}}:2381
|
||||||
kubernetesVersion: {{.KubernetesVersion}}
|
kubernetesVersion: {{.KubernetesVersion}}
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: {{if .DNSDomain}}{{.DNSDomain}}{{else}}cluster.local{{end}}
|
dnsDomain: {{if .DNSDomain}}{{.DNSDomain}}{{else}}cluster.local{{end}}
|
||||||
|
@ -73,4 +75,8 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: {{.AdvertiseAddress}}:10249
|
||||||
`))
|
`))
|
||||||
|
|
|
@ -35,7 +35,8 @@ import (
|
||||||
const remoteContainerRuntime = "remote"
|
const remoteContainerRuntime = "remote"
|
||||||
|
|
||||||
// GenerateKubeadmYAML generates the kubeadm.yaml file
|
// GenerateKubeadmYAML generates the kubeadm.yaml file
|
||||||
func GenerateKubeadmYAML(k8s config.KubernetesConfig, r cruntime.Manager) ([]byte, error) {
|
func GenerateKubeadmYAML(mc config.MachineConfig, r cruntime.Manager) ([]byte, error) {
|
||||||
|
k8s := mc.KubernetesConfig
|
||||||
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
|
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "parsing kubernetes version")
|
return nil, errors.Wrap(err, "parsing kubernetes version")
|
||||||
|
@ -53,7 +54,11 @@ func GenerateKubeadmYAML(k8s config.KubernetesConfig, r cruntime.Manager) ([]byt
|
||||||
}
|
}
|
||||||
|
|
||||||
// In case of no port assigned, use default
|
// In case of no port assigned, use default
|
||||||
nodePort := k8s.NodePort
|
cp, err := config.PrimaryControlPlane(mc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "getting control plane")
|
||||||
|
}
|
||||||
|
nodePort := cp.Port
|
||||||
if nodePort <= 0 {
|
if nodePort <= 0 {
|
||||||
nodePort = constants.APIServerPort
|
nodePort = constants.APIServerPort
|
||||||
}
|
}
|
||||||
|
@ -66,6 +71,7 @@ func GenerateKubeadmYAML(k8s config.KubernetesConfig, r cruntime.Manager) ([]byt
|
||||||
APIServerPort int
|
APIServerPort int
|
||||||
KubernetesVersion string
|
KubernetesVersion string
|
||||||
EtcdDataDir string
|
EtcdDataDir string
|
||||||
|
ClusterName string
|
||||||
NodeName string
|
NodeName string
|
||||||
DNSDomain string
|
DNSDomain string
|
||||||
CRISocket string
|
CRISocket string
|
||||||
|
@ -77,11 +83,12 @@ func GenerateKubeadmYAML(k8s config.KubernetesConfig, r cruntime.Manager) ([]byt
|
||||||
CertDir: vmpath.GuestCertsDir,
|
CertDir: vmpath.GuestCertsDir,
|
||||||
ServiceCIDR: constants.DefaultServiceCIDR,
|
ServiceCIDR: constants.DefaultServiceCIDR,
|
||||||
PodSubnet: k8s.ExtraOptions.Get("pod-network-cidr", Kubeadm),
|
PodSubnet: k8s.ExtraOptions.Get("pod-network-cidr", Kubeadm),
|
||||||
AdvertiseAddress: k8s.NodeIP,
|
AdvertiseAddress: cp.IP,
|
||||||
APIServerPort: nodePort,
|
APIServerPort: nodePort,
|
||||||
KubernetesVersion: k8s.KubernetesVersion,
|
KubernetesVersion: k8s.KubernetesVersion,
|
||||||
EtcdDataDir: EtcdDataDir(),
|
EtcdDataDir: EtcdDataDir(),
|
||||||
NodeName: k8s.NodeName,
|
ClusterName: k8s.ClusterName,
|
||||||
|
NodeName: cp.Name,
|
||||||
CRISocket: r.SocketPath(),
|
CRISocket: r.SocketPath(),
|
||||||
ImageRepository: k8s.ImageRepository,
|
ImageRepository: k8s.ImageRepository,
|
||||||
ExtraArgs: extraComponentConfig,
|
ExtraArgs: extraComponentConfig,
|
||||||
|
|
|
@ -106,9 +106,9 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) {
|
||||||
name string
|
name string
|
||||||
runtime string
|
runtime string
|
||||||
shouldErr bool
|
shouldErr bool
|
||||||
cfg config.KubernetesConfig
|
cfg config.MachineConfig
|
||||||
}{
|
}{
|
||||||
{"dns", "docker", false, config.KubernetesConfig{DNSDomain: "1.1.1.1"}},
|
{"dns", "docker", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{DNSDomain: "1.1.1.1"}}},
|
||||||
}
|
}
|
||||||
for _, version := range versions {
|
for _, version := range versions {
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
|
@ -119,9 +119,15 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) {
|
||||||
tname := tc.name + "_" + version
|
tname := tc.name + "_" + version
|
||||||
t.Run(tname, func(t *testing.T) {
|
t.Run(tname, func(t *testing.T) {
|
||||||
cfg := tc.cfg
|
cfg := tc.cfg
|
||||||
cfg.NodeIP = "1.1.1.1"
|
cfg.Nodes = []config.Node{
|
||||||
cfg.NodeName = "mk"
|
{
|
||||||
cfg.KubernetesVersion = version + ".0"
|
IP: "1.1.1.1",
|
||||||
|
Name: "mk",
|
||||||
|
ControlPlane: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
cfg.KubernetesConfig.KubernetesVersion = version + ".0"
|
||||||
|
cfg.KubernetesConfig.ClusterName = "kubernetes"
|
||||||
|
|
||||||
got, err := GenerateKubeadmYAML(cfg, runtime)
|
got, err := GenerateKubeadmYAML(cfg, runtime)
|
||||||
if err != nil && !tc.shouldErr {
|
if err != nil && !tc.shouldErr {
|
||||||
|
@ -166,17 +172,17 @@ func TestGenerateKubeadmYAML(t *testing.T) {
|
||||||
name string
|
name string
|
||||||
runtime string
|
runtime string
|
||||||
shouldErr bool
|
shouldErr bool
|
||||||
cfg config.KubernetesConfig
|
cfg config.MachineConfig
|
||||||
}{
|
}{
|
||||||
{"default", "docker", false, config.KubernetesConfig{}},
|
{"default", "docker", false, config.MachineConfig{}},
|
||||||
{"containerd", "containerd", false, config.KubernetesConfig{}},
|
{"containerd", "containerd", false, config.MachineConfig{}},
|
||||||
{"crio", "crio", false, config.KubernetesConfig{}},
|
{"crio", "crio", false, config.MachineConfig{}},
|
||||||
{"options", "docker", false, config.KubernetesConfig{ExtraOptions: extraOpts}},
|
{"options", "docker", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}},
|
||||||
{"crio-options-gates", "crio", false, config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}},
|
{"crio-options-gates", "crio", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}},
|
||||||
{"unknown-component", "docker", true, config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}},
|
{"unknown-component", "docker", true, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}},
|
||||||
{"containerd-api-port", "containerd", false, config.KubernetesConfig{NodePort: 12345}},
|
{"containerd-api-port", "containerd", false, config.MachineConfig{Nodes: []config.Node{{Port: 12345}}}},
|
||||||
{"containerd-pod-network-cidr", "containerd", false, config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}},
|
{"containerd-pod-network-cidr", "containerd", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}},
|
||||||
{"image-repository", "docker", false, config.KubernetesConfig{ImageRepository: "test/repo"}},
|
{"image-repository", "docker", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}},
|
||||||
}
|
}
|
||||||
for _, version := range versions {
|
for _, version := range versions {
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
|
@ -187,9 +193,22 @@ func TestGenerateKubeadmYAML(t *testing.T) {
|
||||||
tname := tc.name + "_" + version
|
tname := tc.name + "_" + version
|
||||||
t.Run(tname, func(t *testing.T) {
|
t.Run(tname, func(t *testing.T) {
|
||||||
cfg := tc.cfg
|
cfg := tc.cfg
|
||||||
cfg.NodeIP = "1.1.1.1"
|
|
||||||
cfg.NodeName = "mk"
|
if len(cfg.Nodes) > 0 {
|
||||||
cfg.KubernetesVersion = version + ".0"
|
cfg.Nodes[0].IP = "1.1.1.1"
|
||||||
|
cfg.Nodes[0].Name = "mk"
|
||||||
|
cfg.Nodes[0].ControlPlane = true
|
||||||
|
} else {
|
||||||
|
cfg.Nodes = []config.Node{
|
||||||
|
{
|
||||||
|
IP: "1.1.1.1",
|
||||||
|
Name: "mk",
|
||||||
|
ControlPlane: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cfg.KubernetesConfig.KubernetesVersion = version + ".0"
|
||||||
|
cfg.KubernetesConfig.ClusterName = "kubernetes"
|
||||||
|
|
||||||
got, err := GenerateKubeadmYAML(cfg, runtime)
|
got, err := GenerateKubeadmYAML(cfg, runtime)
|
||||||
if err != nil && !tc.shouldErr {
|
if err != nil && !tc.shouldErr {
|
||||||
|
|
|
@ -30,7 +30,8 @@ import (
|
||||||
|
|
||||||
// NewKubeletConfig generates a new systemd unit containing a configured kubelet
|
// NewKubeletConfig generates a new systemd unit containing a configured kubelet
|
||||||
// based on the options present in the KubernetesConfig.
|
// based on the options present in the KubernetesConfig.
|
||||||
func NewKubeletConfig(k8s config.KubernetesConfig, r cruntime.Manager) ([]byte, error) {
|
func NewKubeletConfig(mc config.MachineConfig, nc config.Node, r cruntime.Manager) ([]byte, error) {
|
||||||
|
k8s := mc.KubernetesConfig
|
||||||
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
|
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "parsing kubernetes version")
|
return nil, errors.Wrap(err, "parsing kubernetes version")
|
||||||
|
@ -52,8 +53,15 @@ func NewKubeletConfig(k8s config.KubernetesConfig, r cruntime.Manager) ([]byte,
|
||||||
if k8s.NetworkPlugin != "" {
|
if k8s.NetworkPlugin != "" {
|
||||||
extraOpts["network-plugin"] = k8s.NetworkPlugin
|
extraOpts["network-plugin"] = k8s.NetworkPlugin
|
||||||
}
|
}
|
||||||
|
cp, err := config.PrimaryControlPlane(mc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "getting master node")
|
||||||
|
}
|
||||||
if _, ok := extraOpts["node-ip"]; !ok {
|
if _, ok := extraOpts["node-ip"]; !ok {
|
||||||
extraOpts["node-ip"] = k8s.NodeIP
|
extraOpts["node-ip"] = cp.IP
|
||||||
|
}
|
||||||
|
if nc.Name != "" {
|
||||||
|
extraOpts["hostname-override"] = nc.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
pauseImage := images.Pause(k8s.ImageRepository)
|
pauseImage := images.Pause(k8s.ImageRepository)
|
||||||
|
|
|
@ -30,17 +30,24 @@ import (
|
||||||
func TestGenerateKubeletConfig(t *testing.T) {
|
func TestGenerateKubeletConfig(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
description string
|
description string
|
||||||
cfg config.KubernetesConfig
|
cfg config.MachineConfig
|
||||||
expected string
|
expected string
|
||||||
shouldErr bool
|
shouldErr bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
description: "old docker",
|
description: "old docker",
|
||||||
cfg: config.KubernetesConfig{
|
cfg: config.MachineConfig{
|
||||||
NodeIP: "192.168.1.100",
|
KubernetesConfig: config.KubernetesConfig{
|
||||||
KubernetesVersion: constants.OldestKubernetesVersion,
|
KubernetesVersion: constants.OldestKubernetesVersion,
|
||||||
NodeName: "minikube",
|
ContainerRuntime: "docker",
|
||||||
ContainerRuntime: "docker",
|
},
|
||||||
|
Nodes: []config.Node{
|
||||||
|
{
|
||||||
|
IP: "192.168.1.100",
|
||||||
|
Name: "minikube",
|
||||||
|
ControlPlane: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
expected: `[Unit]
|
expected: `[Unit]
|
||||||
Wants=docker.socket
|
Wants=docker.socket
|
||||||
|
@ -54,52 +61,41 @@ ExecStart=/var/lib/minikube/binaries/v1.11.10/kubelet --allow-privileged=true --
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "newest cri runtime",
|
description: "newest cri runtime",
|
||||||
cfg: config.KubernetesConfig{
|
cfg: config.MachineConfig{
|
||||||
NodeIP: "192.168.1.100",
|
KubernetesConfig: config.KubernetesConfig{
|
||||||
KubernetesVersion: constants.NewestKubernetesVersion,
|
KubernetesVersion: constants.NewestKubernetesVersion,
|
||||||
NodeName: "minikube",
|
ContainerRuntime: "cri-o",
|
||||||
ContainerRuntime: "cri-o",
|
},
|
||||||
|
Nodes: []config.Node{
|
||||||
|
{
|
||||||
|
IP: "192.168.1.100",
|
||||||
|
Name: "minikube",
|
||||||
|
ControlPlane: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
expected: `[Unit]
|
expected: `[Unit]
|
||||||
Wants=crio.service
|
Wants=crio.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=
|
ExecStart=
|
||||||
ExecStart=/var/lib/minikube/binaries/v1.17.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "default containerd runtime",
|
description: "default containerd runtime",
|
||||||
cfg: config.KubernetesConfig{
|
cfg: config.MachineConfig{
|
||||||
NodeIP: "192.168.1.100",
|
KubernetesConfig: config.KubernetesConfig{
|
||||||
KubernetesVersion: constants.DefaultKubernetesVersion,
|
KubernetesVersion: constants.DefaultKubernetesVersion,
|
||||||
NodeName: "minikube",
|
ContainerRuntime: "containerd",
|
||||||
ContainerRuntime: "containerd",
|
},
|
||||||
},
|
Nodes: []config.Node{
|
||||||
expected: `[Unit]
|
{
|
||||||
Wants=containerd.service
|
IP: "192.168.1.100",
|
||||||
|
Name: "minikube",
|
||||||
[Service]
|
ControlPlane: true,
|
||||||
ExecStart=
|
|
||||||
ExecStart=/var/lib/minikube/binaries/v1.17.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
description: "default containerd runtime",
|
|
||||||
cfg: config.KubernetesConfig{
|
|
||||||
NodeIP: "192.168.1.100",
|
|
||||||
KubernetesVersion: constants.DefaultKubernetesVersion,
|
|
||||||
NodeName: "minikube",
|
|
||||||
ContainerRuntime: "containerd",
|
|
||||||
ExtraOptions: config.ExtraOptionSlice{
|
|
||||||
config.ExtraOption{
|
|
||||||
Component: Kubelet,
|
|
||||||
Key: "node-ip",
|
|
||||||
Value: "192.168.1.200",
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -108,26 +104,65 @@ Wants=containerd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=
|
ExecStart=
|
||||||
ExecStart=/var/lib/minikube/binaries/v1.17.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "default containerd runtime with IP override",
|
||||||
|
cfg: config.MachineConfig{
|
||||||
|
KubernetesConfig: config.KubernetesConfig{
|
||||||
|
KubernetesVersion: constants.DefaultKubernetesVersion,
|
||||||
|
ContainerRuntime: "containerd",
|
||||||
|
ExtraOptions: config.ExtraOptionSlice{
|
||||||
|
config.ExtraOption{
|
||||||
|
Component: Kubelet,
|
||||||
|
Key: "node-ip",
|
||||||
|
Value: "192.168.1.200",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Nodes: []config.Node{
|
||||||
|
{
|
||||||
|
IP: "192.168.1.100",
|
||||||
|
Name: "minikube",
|
||||||
|
ControlPlane: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: `[Unit]
|
||||||
|
Wants=containerd.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=
|
||||||
|
ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "docker with custom image repository",
|
description: "docker with custom image repository",
|
||||||
cfg: config.KubernetesConfig{
|
cfg: config.MachineConfig{
|
||||||
NodeIP: "192.168.1.100",
|
KubernetesConfig: config.KubernetesConfig{
|
||||||
KubernetesVersion: constants.DefaultKubernetesVersion,
|
KubernetesVersion: constants.DefaultKubernetesVersion,
|
||||||
NodeName: "minikube",
|
ContainerRuntime: "docker",
|
||||||
ContainerRuntime: "docker",
|
ImageRepository: "docker-proxy-image.io/google_containers",
|
||||||
ImageRepository: "docker-proxy-image.io/google_containers",
|
},
|
||||||
|
Nodes: []config.Node{
|
||||||
|
{
|
||||||
|
IP: "192.168.1.100",
|
||||||
|
Name: "minikube",
|
||||||
|
ControlPlane: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
expected: `[Unit]
|
expected: `[Unit]
|
||||||
Wants=docker.socket
|
Wants=docker.socket
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=
|
ExecStart=
|
||||||
ExecStart=/var/lib/minikube/binaries/v1.17.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.1 --pod-manifest-path=/etc/kubernetes/manifests
|
ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.1 --pod-manifest-path=/etc/kubernetes/manifests
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
`,
|
`,
|
||||||
|
@ -136,13 +171,13 @@ ExecStart=/var/lib/minikube/binaries/v1.17.0/kubelet --authorization-mode=Webhoo
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.description, func(t *testing.T) {
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
runtime, err := cruntime.New(cruntime.Config{Type: tc.cfg.ContainerRuntime,
|
runtime, err := cruntime.New(cruntime.Config{Type: tc.cfg.KubernetesConfig.ContainerRuntime,
|
||||||
Runner: command.NewFakeCommandRunner()})
|
Runner: command.NewFakeCommandRunner()})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("runtime: %v", err)
|
t.Fatalf("runtime: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
got, err := NewKubeletConfig(tc.cfg, runtime)
|
got, err := NewKubeletConfig(tc.cfg, tc.cfg.Nodes[0], runtime)
|
||||||
if err != nil && !tc.shouldErr {
|
if err != nil && !tc.shouldErr {
|
||||||
t.Errorf("got unexpected error generating config: %v", err)
|
t.Errorf("got unexpected error generating config: %v", err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -23,6 +23,9 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/machine/libmachine/state"
|
"github.com/docker/machine/libmachine/state"
|
||||||
|
@ -37,13 +40,12 @@ import (
|
||||||
// APIServerProcess waits for api server to be healthy returns error if it doesn't
|
// APIServerProcess waits for api server to be healthy returns error if it doesn't
|
||||||
func APIServerProcess(runner command.Runner, start time.Time, timeout time.Duration) error {
|
func APIServerProcess(runner command.Runner, start time.Time, timeout time.Duration) error {
|
||||||
glog.Infof("waiting for apiserver process to appear ...")
|
glog.Infof("waiting for apiserver process to appear ...")
|
||||||
err := wait.PollImmediate(time.Second*1, timeout, func() (bool, error) {
|
err := wait.PollImmediate(time.Millisecond*500, timeout, func() (bool, error) {
|
||||||
if time.Since(start) > timeout {
|
if time.Since(start) > timeout {
|
||||||
return false, fmt.Errorf("cluster wait timed out during process check")
|
return false, fmt.Errorf("cluster wait timed out during process check")
|
||||||
}
|
}
|
||||||
rr, ierr := runner.RunCmd(exec.Command("sudo", "pgrep", "kube-apiserver"))
|
|
||||||
if ierr != nil {
|
if _, ierr := apiServerPID(runner); ierr != nil {
|
||||||
glog.Warningf("pgrep apiserver: %v cmd: %s", ierr, rr.Command())
|
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
@ -55,9 +57,19 @@ func APIServerProcess(runner command.Runner, start time.Time, timeout time.Durat
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// apiServerPID returns our best guess to the apiserver pid
|
||||||
|
func apiServerPID(cr command.Runner) (int, error) {
|
||||||
|
rr, err := cr.RunCmd(exec.Command("sudo", "pgrep", "-xnf", "kube-apiserver.*minikube.*"))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
s := strings.TrimSpace(rr.Stdout.String())
|
||||||
|
return strconv.Atoi(s)
|
||||||
|
}
|
||||||
|
|
||||||
// SystemPods verifies essential pods for running kurnetes is running
|
// SystemPods verifies essential pods for running kurnetes is running
|
||||||
func SystemPods(client *kubernetes.Clientset, start time.Time, ip string, port int, timeout time.Duration) error {
|
func SystemPods(client *kubernetes.Clientset, start time.Time, timeout time.Duration) error {
|
||||||
glog.Infof("waiting for kube-system pods to appear %s...", net.JoinHostPort(ip, fmt.Sprint(port)))
|
glog.Info("waiting for kube-system pods to appear ...")
|
||||||
pStart := time.Now()
|
pStart := time.Now()
|
||||||
podStart := time.Time{}
|
podStart := time.Time{}
|
||||||
podList := func() (bool, error) {
|
podList := func() (bool, error) {
|
||||||
|
@ -101,12 +113,12 @@ func APIServerIsRunning(start time.Time, ip string, port int, timeout time.Durat
|
||||||
return false, fmt.Errorf("cluster wait timed out during healthz check")
|
return false, fmt.Errorf("cluster wait timed out during healthz check")
|
||||||
}
|
}
|
||||||
|
|
||||||
status, err := APIServerStatus(net.ParseIP(ip), port)
|
status, err := apiServerHealthz(net.ParseIP(ip), port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Warningf("status: %v", err)
|
glog.Warningf("status: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if status != "Running" {
|
if status != state.Running {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
@ -119,9 +131,49 @@ func APIServerIsRunning(start time.Time, ip string, port int, timeout time.Durat
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// APIServerStatus hits the /healthz endpoint and returns libmachine style state.State
|
// APIServerStatus returns apiserver status in libmachine style state.State
|
||||||
func APIServerStatus(ip net.IP, apiserverPort int) (string, error) {
|
func APIServerStatus(cr command.Runner, ip net.IP, port int) (state.State, error) {
|
||||||
url := fmt.Sprintf("https://%s/healthz", net.JoinHostPort(ip.String(), fmt.Sprint(apiserverPort)))
|
glog.Infof("Checking apiserver status ...")
|
||||||
|
|
||||||
|
pid, err := apiServerPID(cr)
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("unable to get apiserver pid: %v", err)
|
||||||
|
return state.Stopped, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the freezer cgroup entry for this pid
|
||||||
|
rr, err := cr.RunCmd(exec.Command("sudo", "egrep", "^[0-9]+:freezer:", fmt.Sprintf("/proc/%d/cgroup", pid)))
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("unable to find freezer cgroup: %v", err)
|
||||||
|
return apiServerHealthz(ip, port)
|
||||||
|
|
||||||
|
}
|
||||||
|
freezer := strings.TrimSpace(rr.Stdout.String())
|
||||||
|
glog.Infof("apiserver freezer: %q", freezer)
|
||||||
|
fparts := strings.Split(freezer, ":")
|
||||||
|
if len(fparts) != 3 {
|
||||||
|
glog.Warningf("unable to parse freezer - found %d parts: %s", len(fparts), freezer)
|
||||||
|
return apiServerHealthz(ip, port)
|
||||||
|
}
|
||||||
|
|
||||||
|
rr, err = cr.RunCmd(exec.Command("sudo", "cat", path.Join("/sys/fs/cgroup/freezer", fparts[2], "freezer.state")))
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("unable to get freezer state: %s", rr.Stderr.String())
|
||||||
|
return apiServerHealthz(ip, port)
|
||||||
|
}
|
||||||
|
|
||||||
|
fs := strings.TrimSpace(rr.Stdout.String())
|
||||||
|
glog.Infof("freezer state: %q", fs)
|
||||||
|
if fs == "FREEZING" || fs == "FROZEN" {
|
||||||
|
return state.Paused, nil
|
||||||
|
}
|
||||||
|
return apiServerHealthz(ip, port)
|
||||||
|
}
|
||||||
|
|
||||||
|
// apiServerHealthz hits the /healthz endpoint and returns libmachine style state.State
|
||||||
|
func apiServerHealthz(ip net.IP, port int) (state.State, error) {
|
||||||
|
url := fmt.Sprintf("https://%s/healthz", net.JoinHostPort(ip.String(), fmt.Sprint(port)))
|
||||||
|
glog.Infof("Checking apiserver healthz at %s ...", url)
|
||||||
// To avoid: x509: certificate signed by unknown authority
|
// To avoid: x509: certificate signed by unknown authority
|
||||||
tr := &http.Transport{
|
tr := &http.Transport{
|
||||||
Proxy: nil, // To avoid connectiv issue if http(s)_proxy is set.
|
Proxy: nil, // To avoid connectiv issue if http(s)_proxy is set.
|
||||||
|
@ -131,11 +183,31 @@ func APIServerStatus(ip net.IP, apiserverPort int) (string, error) {
|
||||||
resp, err := client.Get(url)
|
resp, err := client.Get(url)
|
||||||
// Connection refused, usually.
|
// Connection refused, usually.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return state.Stopped.String(), nil
|
return state.Stopped, nil
|
||||||
}
|
}
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
glog.Warningf("%s response: %v %+v", url, err, resp)
|
glog.Warningf("%s response: %v %+v", url, err, resp)
|
||||||
return state.Error.String(), nil
|
return state.Error, nil
|
||||||
}
|
}
|
||||||
return state.Running.String(), nil
|
return state.Running, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func KubeletStatus(cr command.Runner) (state.State, error) {
|
||||||
|
glog.Infof("Checking kubelet status ...")
|
||||||
|
rr, err := cr.RunCmd(exec.Command("sudo", "systemctl", "is-active", "kubelet"))
|
||||||
|
if err != nil {
|
||||||
|
// Do not return now, as we still have parsing to do!
|
||||||
|
glog.Warningf("%s returned error: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
s := strings.TrimSpace(rr.Stdout.String())
|
||||||
|
glog.Infof("kubelet is-active: %s", s)
|
||||||
|
switch s {
|
||||||
|
case "active":
|
||||||
|
return state.Running, nil
|
||||||
|
case "inactive":
|
||||||
|
return state.Stopped, nil
|
||||||
|
case "activating":
|
||||||
|
return state.Starting, nil
|
||||||
|
}
|
||||||
|
return state.Error, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.14.0
|
kubernetesVersion: v1.14.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.14.0
|
kubernetesVersion: v1.14.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.14.0
|
kubernetesVersion: v1.14.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -40,6 +40,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.14.0
|
kubernetesVersion: v1.14.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -53,3 +55,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.14.0
|
kubernetesVersion: v1.14.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.14.0
|
kubernetesVersion: v1.14.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.14.0
|
kubernetesVersion: v1.14.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: 1.1.1.1
|
dnsDomain: 1.1.1.1
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -31,6 +31,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.14.0
|
kubernetesVersion: v1.14.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -44,3 +46,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -37,6 +37,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.14.0
|
kubernetesVersion: v1.14.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -50,3 +52,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.15.0
|
kubernetesVersion: v1.15.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.15.0
|
kubernetesVersion: v1.15.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.15.0
|
kubernetesVersion: v1.15.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -40,6 +40,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.15.0
|
kubernetesVersion: v1.15.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -53,3 +55,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.15.0
|
kubernetesVersion: v1.15.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.15.0
|
kubernetesVersion: v1.15.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.15.0
|
kubernetesVersion: v1.15.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: 1.1.1.1
|
dnsDomain: 1.1.1.1
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -31,6 +31,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.15.0
|
kubernetesVersion: v1.15.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -44,3 +46,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -37,6 +37,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.15.0
|
kubernetesVersion: v1.15.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -50,3 +52,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.16.0
|
kubernetesVersion: v1.16.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.16.0
|
kubernetesVersion: v1.16.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.16.0
|
kubernetesVersion: v1.16.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -40,6 +40,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.16.0
|
kubernetesVersion: v1.16.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -53,3 +55,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.16.0
|
kubernetesVersion: v1.16.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.16.0
|
kubernetesVersion: v1.16.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.16.0
|
kubernetesVersion: v1.16.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: 1.1.1.1
|
dnsDomain: 1.1.1.1
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -31,6 +31,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.16.0
|
kubernetesVersion: v1.16.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -44,3 +46,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -37,6 +37,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.16.0
|
kubernetesVersion: v1.16.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -50,3 +52,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.17.0
|
kubernetesVersion: v1.17.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.17.0
|
kubernetesVersion: v1.17.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
|
@ -30,6 +30,8 @@ dns:
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
dataDir: /var/lib/minikube/etcd
|
dataDir: /var/lib/minikube/etcd
|
||||||
|
extraArgs:
|
||||||
|
listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381
|
||||||
kubernetesVersion: v1.17.0
|
kubernetesVersion: v1.17.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: cluster.local
|
dnsDomain: cluster.local
|
||||||
|
@ -43,3 +45,7 @@ evictionHard:
|
||||||
nodefs.available: "0%"
|
nodefs.available: "0%"
|
||||||
nodefs.inodesFree: "0%"
|
nodefs.inodesFree: "0%"
|
||||||
imagefs.available: "0%"
|
imagefs.available: "0%"
|
||||||
|
---
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 1.1.1.1:10249
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue