Merge remote-tracking branch 'upstream' into mytests
commit
12f2d69259
|
@ -0,0 +1,41 @@
|
|||
name: "generate-docs"
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
env:
|
||||
GOPROXY: https://proxy.golang.org
|
||||
GO_VERSION: 1.16.4
|
||||
jobs:
|
||||
generate-docs:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{env.GO_VERSION}}
|
||||
stable: true
|
||||
- name: Generate Docs
|
||||
id: gendocs
|
||||
run: |
|
||||
make generate-docs
|
||||
echo "::set-output name=changes::$(git status --porcelain)"
|
||||
- name: Create PR
|
||||
if: ${{ steps.gendocs.outputs.changes != '' }}
|
||||
uses: peter-evans/create-pull-request@v3
|
||||
with:
|
||||
token: ${{ secrets.MINIKUBE_BOT_PAT }}
|
||||
commit-message: Update auto-generated docs and translations
|
||||
committer: minikube-bot <minikube-bot@google.com>
|
||||
author: minikube-bot <minikube-bot@google.com>
|
||||
branch: gendocs
|
||||
push-to-fork: minikube-bot/minikube
|
||||
base: master
|
||||
delete-branch: true
|
||||
title: 'Update auto-generated docs and translations'
|
||||
body: |
|
||||
Committing changes resulting from `make generate-docs`.
|
||||
This PR is auto-generated by the [gendocs](https://github.com/kubernetes/minikube/blob/master/.github/workflows/docs.yml) CI workflow.
|
||||
```
|
||||
${{ steps.gendocs.outputs.changes }}
|
||||
```
|
|
@ -40,7 +40,7 @@ jobs:
|
|||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libvirt-dev
|
||||
make cross e2e-cross debs
|
||||
MINIKUBE_BUILD_IN_DOCKER=y make cross e2e-cross debs
|
||||
cp -r test/integration/testdata ./out
|
||||
whoami
|
||||
echo github ref $GITHUB_REF
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
name: "time-to-k8s benchmark"
|
||||
on:
|
||||
release:
|
||||
types: [released]
|
||||
env:
|
||||
GOPROXY: https://proxy.golang.org
|
||||
GO_VERSION: 1.16.4
|
||||
jobs:
|
||||
benchmark:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Checkout submodules
|
||||
run: git submodule update --init
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{env.GO_VERSION}}
|
||||
stable: true
|
||||
- name: Benchmark
|
||||
run: |
|
||||
./hack/benchmark/time-to-k8s/time-to-k8s.sh ${{ secrets.MINIKUBE_BOT_PAT }}
|
|
@ -0,0 +1,28 @@
|
|||
name: Translations Validation
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "translations/**"
|
||||
env:
|
||||
GOPROXY: https://proxy.golang.org
|
||||
GO_VERSION: 1.16.4
|
||||
jobs:
|
||||
unit_test:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{env.GO_VERSION}}
|
||||
stable: true
|
||||
- name: Install libvirt
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libvirt-dev
|
||||
- name: Download Dependencies
|
||||
run: go mod download
|
||||
- name: Unit Test
|
||||
env:
|
||||
TESTSUITE: unittest
|
||||
run: make test
|
||||
continue-on-error: false
|
|
@ -0,0 +1,18 @@
|
|||
name: "Tweet the release"
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
release:
|
||||
types: [published]
|
||||
jobs:
|
||||
twitter-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: ethomson/send-tweet-action@v1
|
||||
with:
|
||||
status: "A new minikube version just released ! check it out https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md"
|
||||
consumer-key: ${{ secrets.TWITTER_API_KEY }}
|
||||
consumer-secret: ${{ secrets.TWITTER_API_SECRET }}
|
||||
access-token: ${{ secrets.TWITTER_ACCESS_TOKEN }}
|
||||
access-token-secret: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }}
|
|
@ -0,0 +1,42 @@
|
|||
name: "update-kubernetes-versions"
|
||||
on:
|
||||
schedule:
|
||||
# every Monday at around 1 am pacific/8 am UTC
|
||||
- cron: "0 8 * * 1"
|
||||
env:
|
||||
GOPROXY: https://proxy.golang.org
|
||||
GO_VERSION: 1.16.4
|
||||
jobs:
|
||||
bump-k8s-versions:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{env.GO_VERSION}}
|
||||
stable: true
|
||||
- name: Bump Kuberenetes Versions
|
||||
id: bumpk8s
|
||||
run: |
|
||||
make update-kubernetes-version
|
||||
echo "::set-output name=changes::$(git status --porcelain)"
|
||||
- name: Create PR
|
||||
if: ${{ steps.bumpk8s.outputs.changes != '' }}
|
||||
uses: peter-evans/create-pull-request@v3
|
||||
with:
|
||||
token: ${{ secrets.MINIKUBE_BOT_PAT }}
|
||||
commit-message: bump default/newest kubernetes versions
|
||||
committer: minikube-bot <minikube-bot@google.com>
|
||||
author: minikube-bot <minikube-bot@google.com>
|
||||
branch: auto_bump_k8s_versions
|
||||
push-to-fork: minikube-bot/minikube
|
||||
base: master
|
||||
delete-branch: true
|
||||
title: 'bump default/newest kubernetes versions'
|
||||
labels: ok-to-test
|
||||
body: |
|
||||
This PR was auto-generated by `make update-kubernetes-version` using [update-k8s-versions.yml](https://github.com/kubernetes/minikube/tree/master/.github/workflows) CI Workflow.
|
||||
Please only merge if all the tests pass.
|
||||
|
||||
${{ steps.bumpk8s.outputs.changes }}
|
||||
|
|
@ -35,10 +35,6 @@ _testmain.go
|
|||
#iso version file
|
||||
deploy/iso/minikube-iso/board/coreos/minikube/rootfs-overlay/etc/VERSION
|
||||
|
||||
/pkg/minikube/assets/assets.go-e
|
||||
/pkg/minikube/assets/assets.go
|
||||
/pkg/minikube/translate/translations.go
|
||||
/pkg/minikube/translate/translations.go-e
|
||||
/minikube
|
||||
|
||||
.DS_Store
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[submodule "site/themes/docsy"]
|
||||
path = site/themes/docsy
|
||||
url = https://github.com/google/docsy.git
|
||||
[submodule "hack/benchmark/time-to-k8s/time-to-k8s"]
|
||||
path = hack/benchmark/time-to-k8s/time-to-k8s
|
||||
[submodule "hack/benchmark/time-to-k8s/time-to-k8s-repo"]
|
||||
path = hack/benchmark/time-to-k8s/time-to-k8s-repo
|
||||
url = https://github.com/tstromberg/time-to-k8s.git
|
||||
|
|
96
CHANGELOG.md
96
CHANGELOG.md
|
@ -1,5 +1,101 @@
|
|||
# Release Notes
|
||||
|
||||
## Version 1.22.0-beta.0 - 2021-06-28
|
||||
|
||||
Features:
|
||||
|
||||
* auto-pause addon: add support for arm64 [#11743](https://github.com/kubernetes/minikube/pull/11743)
|
||||
* `addon list`: add info on each addon's maintainer [#11753](https://github.com/kubernetes/minikube/pull/11753)
|
||||
* add ability to pass max to `--cpu` and `--memory` flags [#11692](https://github.com/kubernetes/minikube/pull/11692)
|
||||
|
||||
Bugs:
|
||||
|
||||
* Fix `--base-image` caching for images specified by name:tag [#11603](https://github.com/kubernetes/minikube/pull/11603)
|
||||
* Fix embed-certs global config [#11576](https://github.com/kubernetes/minikube/pull/11576)
|
||||
* Fix a download link to use arm64 instead of amd64 [#11653](https://github.com/kubernetes/minikube/pull/11653)
|
||||
* fix downloading duplicate base image [#11690](https://github.com/kubernetes/minikube/pull/11690)
|
||||
* fix multi-node loosing track of nodes after second restart [#11731](https://github.com/kubernetes/minikube/pull/11731)
|
||||
* gcp-auth: do not override existing environment variables in pods [#11665](https://github.com/kubernetes/minikube/pull/11665)
|
||||
|
||||
Minor improvements:
|
||||
|
||||
* Allow running amd64 binary on M1 [#11674](https://github.com/kubernetes/minikube/pull/11674)
|
||||
* improve containerd experience on cgroup v2 [#11632](https://github.com/kubernetes/minikube/pull/11632)
|
||||
* Improve French locale [#11728](https://github.com/kubernetes/minikube/pull/11728)
|
||||
* Fix UI error for stoppping systemd service [#11667](https://github.com/kubernetes/minikube/pull/11667)
|
||||
* international languages: allow using LC_ALL env to set local language for windows [#11721](https://github.com/kubernetes/minikube/pull/11721)
|
||||
* Change registery_mirror to registery-mirror [#11678](https://github.com/kubernetes/minikube/pull/11678)
|
||||
|
||||
Version Upgrades:
|
||||
|
||||
* ISO: Upgrade podman to 3.1.2 [#11704](https://github.com/kubernetes/minikube/pull/11704)
|
||||
* Upgrade Buildroot to 2021.02 LTS with Linux 4.19 [#11688](https://github.com/kubernetes/minikube/pull/11688)
|
||||
|
||||
For a more detailed changelog, including changes occuring in pre-release versions, see [CHANGELOG.md](https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md).
|
||||
|
||||
Thank you to our contributors for this release!
|
||||
|
||||
- Anders F Björklund
|
||||
- Andriy Dzikh
|
||||
- Daehyeok Mun
|
||||
- Dongjoon Hyun
|
||||
- Felipe Crescencio de Oliveira
|
||||
- Ilya Zuyev
|
||||
- JacekDuszenko
|
||||
- Jeff MAURY
|
||||
- Medya Ghazizadeh
|
||||
- Peixuan Ding
|
||||
- RA489
|
||||
- Sharif Elgamal
|
||||
- Steven Powell
|
||||
- Vishal Jain
|
||||
- zhangdb-git
|
||||
|
||||
Thank you to our PR reviewers for this release!
|
||||
|
||||
- medyagh (63 comments)
|
||||
- sharifelgamal (9 comments)
|
||||
- ilya-zuyev (6 comments)
|
||||
- andriyDev (3 comments)
|
||||
- spowelljr (3 comments)
|
||||
- afbjorklund (1 comments)
|
||||
- prezha (1 comments)
|
||||
- tharun208 (1 comments)
|
||||
|
||||
Thank you to our triage members for this release!
|
||||
|
||||
## Version 1.21.0 - 2021-06-10
|
||||
* add more polish translations [#11587](https://github.com/kubernetes/minikube/pull/11587)
|
||||
* Modify MetricsServer to use v1 api version (instead of v1beta1). [#11584](https://github.com/kubernetes/minikube/pull/11584)
|
||||
|
||||
For a more detailed changelog, including changes occuring in pre-release versions, see [CHANGELOG.md](https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md).
|
||||
|
||||
Thank you to our contributors for this release!
|
||||
|
||||
- Andriy Dzikh
|
||||
- Ilya Zuyev
|
||||
- JacekDuszenko
|
||||
- Medya Ghazizadeh
|
||||
- Sharif Elgamal
|
||||
- Steven Powell
|
||||
|
||||
Thank you to our PR reviewers for this release!
|
||||
|
||||
- spowelljr (11 comments)
|
||||
- medyagh (2 comments)
|
||||
- sharifelgamal (2 comments)
|
||||
- andriyDev (1 comments)
|
||||
|
||||
Thank you to our triage members for this release!
|
||||
|
||||
- RA489 (12 comments)
|
||||
- andriyDev (10 comments)
|
||||
- sharifelgamal (10 comments)
|
||||
- JacekDuszenko (7 comments)
|
||||
- spowelljr (5 comments)
|
||||
|
||||
Check out our [contributions leaderboard](https://minikube.sigs.k8s.io/docs/contrib/leaderboard/v1.21.0/) for this release!
|
||||
|
||||
## Version 1.21.0-beta.0 - 2021-06-02
|
||||
Features:
|
||||
* Support setting addons from environmental variables [#11469](https://github.com/kubernetes/minikube/pull/11469)
|
||||
|
|
246
Makefile
246
Makefile
|
@ -14,7 +14,7 @@
|
|||
|
||||
# Bump these on release - and please check ISO_VERSION for correctness.
|
||||
VERSION_MAJOR ?= 1
|
||||
VERSION_MINOR ?= 21
|
||||
VERSION_MINOR ?= 22
|
||||
VERSION_BUILD ?= 0-beta.0
|
||||
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
|
||||
VERSION ?= v$(RAW_VERSION)
|
||||
|
@ -23,7 +23,7 @@ KUBERNETES_VERSION ?= $(shell egrep "DefaultKubernetesVersion =" pkg/minikube/co
|
|||
KIC_VERSION ?= $(shell egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2)
|
||||
|
||||
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
|
||||
ISO_VERSION ?= v1.20.0
|
||||
ISO_VERSION ?= v1.22.0
|
||||
# Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta
|
||||
DEB_VERSION ?= $(subst -,~,$(RAW_VERSION))
|
||||
DEB_REVISION ?= 0
|
||||
|
@ -40,7 +40,7 @@ KVM_GO_VERSION ?= $(GO_VERSION:.0=)
|
|||
|
||||
INSTALL_SIZE ?= $(shell du out/minikube-windows-amd64.exe | cut -f1)
|
||||
BUILDROOT_BRANCH ?= 2020.02.12
|
||||
REGISTRY?=gcr.io/k8s-minikube
|
||||
REGISTRY ?= gcr.io/k8s-minikube
|
||||
|
||||
# Get git commit id
|
||||
COMMIT_NO := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
|
@ -54,7 +54,9 @@ HYPERKIT_BUILD_IMAGE ?= neilotoole/xcgo:go1.15
|
|||
BUILD_IMAGE ?= us.gcr.io/k8s-artifacts-prod/build-image/kube-cross:v$(GO_VERSION)-1
|
||||
|
||||
ISO_BUILD_IMAGE ?= $(REGISTRY)/buildroot-image
|
||||
KVM_BUILD_IMAGE ?= $(REGISTRY)/kvm-build-image:$(KVM_GO_VERSION)
|
||||
|
||||
KVM_BUILD_IMAGE_AMD64 ?= $(REGISTRY)/kvm-build-image_amd64:$(KVM_GO_VERSION)
|
||||
KVM_BUILD_IMAGE_ARM64 ?= $(REGISTRY)/kvm-build-image_arm64:$(KVM_GO_VERSION)
|
||||
|
||||
ISO_BUCKET ?= minikube/iso
|
||||
|
||||
|
@ -74,8 +76,7 @@ GOLINT_GOGC ?= 100
|
|||
GOLINT_OPTIONS = --timeout 7m \
|
||||
--build-tags "${MINIKUBE_INTEGRATION_BUILD_TAGS}" \
|
||||
--enable gofmt,goimports,gocritic,golint,gocyclo,misspell,nakedret,stylecheck,unconvert,unparam,dogsled \
|
||||
--exclude 'variable on range scope.*in function literal|ifElseChain' \
|
||||
--skip-files "pkg/minikube/translate/translations.go|pkg/minikube/assets/assets.go"
|
||||
--exclude 'variable on range scope.*in function literal|ifElseChain'
|
||||
|
||||
export GO111MODULE := on
|
||||
|
||||
|
@ -130,13 +131,15 @@ MINIKUBE_MARKDOWN_FILES := README.md CONTRIBUTING.md CHANGELOG.md
|
|||
MINIKUBE_BUILD_TAGS :=
|
||||
MINIKUBE_INTEGRATION_BUILD_TAGS := integration $(MINIKUBE_BUILD_TAGS)
|
||||
|
||||
CMD_SOURCE_DIRS = cmd pkg
|
||||
CMD_SOURCE_DIRS = cmd pkg deploy/addons translations
|
||||
SOURCE_DIRS = $(CMD_SOURCE_DIRS) test
|
||||
SOURCE_PACKAGES = ./cmd/... ./pkg/... ./test/...
|
||||
SOURCE_PACKAGES = ./cmd/... ./pkg/... ./deploy/addons/... ./translations/... ./test/...
|
||||
|
||||
SOURCE_GENERATED = pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go
|
||||
SOURCE_FILES = $(shell find $(CMD_SOURCE_DIRS) -type f -name "*.go" | grep -v _test.go)
|
||||
GOTEST_FILES = $(shell find $(CMD_SOURCE_DIRS) -type f -name "*.go" | grep _test.go)
|
||||
ADDON_FILES = $(shell find "deploy/addons" -type f | grep -v "\.go")
|
||||
TRANSLATION_FILES = $(shell find "translations" -type f | grep -v "\.go")
|
||||
ASSET_FILES = $(ADDON_FILES) $(TRANSLATION_FILES)
|
||||
|
||||
# kvm2 ldflags
|
||||
KVM2_LDFLAGS := -X k8s.io/minikube/pkg/drivers/kvm.version=$(VERSION) -X k8s.io/minikube/pkg/drivers/kvm.gitCommitID=$(COMMIT)
|
||||
|
@ -195,7 +198,7 @@ ifneq ($(TEST_FILES),)
|
|||
INTEGRATION_TESTS_TO_RUN := $(addprefix ./test/integration/, $(TEST_HELPERS) $(TEST_FILES))
|
||||
endif
|
||||
|
||||
out/minikube$(IS_EXE): $(SOURCE_GENERATED) $(SOURCE_FILES) go.mod
|
||||
out/minikube$(IS_EXE): $(SOURCE_FILES) $(ASSET_FILES) go.mod
|
||||
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
|
||||
$(call DOCKER,$(BUILD_IMAGE),GOOS=$(GOOS) GOARCH=$(GOARCH) GOARM=$(GOARM) /usr/bin/make $@)
|
||||
else
|
||||
|
@ -244,7 +247,7 @@ minikube-windows-amd64.exe: out/minikube-windows-amd64.exe ## Build Minikube for
|
|||
|
||||
eq = $(and $(findstring x$(1),x$(2)),$(findstring x$(2),x$(1)))
|
||||
|
||||
out/minikube-%: $(SOURCE_GENERATED) $(SOURCE_FILES)
|
||||
out/minikube-%: $(SOURCE_FILES) $(ASSET_FILES)
|
||||
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
|
||||
$(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@)
|
||||
else
|
||||
|
@ -253,7 +256,7 @@ else
|
|||
go build -tags "$(MINIKUBE_BUILD_TAGS)" -ldflags="$(MINIKUBE_LDFLAGS)" -a -o $@ k8s.io/minikube/cmd/minikube
|
||||
endif
|
||||
|
||||
out/minikube-linux-armv6: $(SOURCE_GENERATED) $(SOURCE_FILES)
|
||||
out/minikube-linux-armv6: $(SOURCE_FILES) $(ASSET_FILES)
|
||||
$(Q)GOOS=linux GOARCH=arm GOARM=6 \
|
||||
go build -tags "$(MINIKUBE_BUILD_TAGS)" -ldflags="$(MINIKUBE_LDFLAGS)" -a -o $@ k8s.io/minikube/cmd/minikube
|
||||
|
||||
|
@ -310,18 +313,23 @@ iso_in_docker:
|
|||
--user $(shell id -u):$(shell id -g) --env HOME=/tmp --env IN_DOCKER=1 \
|
||||
$(ISO_BUILD_IMAGE) /bin/bash
|
||||
|
||||
test-iso: $(SOURCE_GENERATED)
|
||||
test-iso:
|
||||
go test -v $(INTEGRATION_TESTS_TO_RUN) --tags=iso --minikube-start-args="--iso-url=file://$(shell pwd)/out/buildroot/output/images/rootfs.iso9660"
|
||||
|
||||
.PHONY: test-pkg
|
||||
test-pkg/%: $(SOURCE_GENERATED) ## Trigger packaging test
|
||||
test-pkg/%: ## Trigger packaging test
|
||||
go test -v -test.timeout=60m ./$* --tags="$(MINIKUBE_BUILD_TAGS)"
|
||||
|
||||
.PHONY: all
|
||||
all: cross drivers e2e-cross cross-tars exotic retro out/gvisor-addon ## Build all different minikube components
|
||||
|
||||
.PHONY: drivers
|
||||
drivers: docker-machine-driver-hyperkit docker-machine-driver-kvm2 ## Build Hyperkit and KVM2 drivers
|
||||
drivers: ## Build Hyperkit and KVM2 drivers
|
||||
drivers: docker-machine-driver-hyperkit \
|
||||
docker-machine-driver-kvm2 \
|
||||
out/docker-machine-driver-kvm2-amd64 \
|
||||
out/docker-machine-driver-kvm2-arm64
|
||||
|
||||
|
||||
.PHONY: docker-machine-driver-hyperkit
|
||||
docker-machine-driver-hyperkit: out/docker-machine-driver-hyperkit ## Build Hyperkit driver
|
||||
|
@ -364,7 +372,7 @@ else
|
|||
endif
|
||||
|
||||
.PHONY: test
|
||||
test: $(SOURCE_GENERATED) ## Trigger minikube test
|
||||
test: ## Trigger minikube test
|
||||
MINIKUBE_LDFLAGS="${MINIKUBE_LDFLAGS}" ./test.sh
|
||||
|
||||
.PHONY: generate-docs
|
||||
|
@ -372,7 +380,7 @@ generate-docs: extract out/minikube ## Automatically generate commands documenta
|
|||
out/minikube generate-docs --path ./site/content/en/docs/commands/ --test-path ./site/content/en/docs/contrib/tests.en.md --code-path ./site/content/en/docs/contrib/errorcodes.en.md
|
||||
|
||||
.PHONY: gotest
|
||||
gotest: $(SOURCE_GENERATED) ## Trigger minikube test
|
||||
gotest: ## Trigger minikube test
|
||||
$(if $(quiet),@echo " TEST $@")
|
||||
$(Q)go test -tags "$(MINIKUBE_BUILD_TAGS)" -ldflags="$(MINIKUBE_LDFLAGS)" $(MINIKUBE_TEST_FILES)
|
||||
|
||||
|
@ -397,33 +405,6 @@ out/coverage.html: out/coverage.out
|
|||
extract: ## extract internationalization words for translations
|
||||
go run cmd/extract/extract.go
|
||||
|
||||
# Regenerates assets.go when template files have been updated
|
||||
pkg/minikube/assets/assets.go: $(shell find "deploy/addons" -type f)
|
||||
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
|
||||
$(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@)
|
||||
endif
|
||||
@which go-bindata >/dev/null 2>&1 || GO111MODULE=off GOBIN="$(GOPATH)$(DIRSEP)bin" go get github.com/go-bindata/go-bindata/...
|
||||
$(if $(quiet),@echo " GEN $@")
|
||||
$(Q)PATH="$(PATH)$(PATHSEP)$(GOPATH)$(DIRSEP)bin" go-bindata -nomemcopy -o $@ -pkg assets deploy/addons/...
|
||||
$(Q)-gofmt -s -w $@
|
||||
@#golint: Dns should be DNS (compat sed)
|
||||
@sed -i -e 's/Dns/DNS/g' $@ && rm -f ./-e
|
||||
@#golint: Html should be HTML (compat sed)
|
||||
@sed -i -e 's/Html/HTML/g' $@ && rm -f ./-e
|
||||
@#golint: don't use underscores in Go names
|
||||
@sed -i -e 's/SnapshotStorageK8sIo_volumesnapshot/SnapshotStorageK8sIoVolumesnapshot/g' $@ && rm -f ./-e
|
||||
|
||||
pkg/minikube/translate/translations.go: $(shell find "translations/" -type f)
|
||||
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
|
||||
$(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@)
|
||||
endif
|
||||
@which go-bindata >/dev/null 2>&1 || GO111MODULE=off GOBIN="$(GOPATH)$(DIRSEP)bin" go get github.com/go-bindata/go-bindata/...
|
||||
$(if $(quiet),@echo " GEN $@")
|
||||
$(Q)PATH="$(PATH)$(PATHSEP)$(GOPATH)$(DIRSEP)bin" go-bindata -nomemcopy -o $@ -pkg translate translations/...
|
||||
$(Q)-gofmt -s -w $@
|
||||
@#golint: Json should be JSON (compat sed)
|
||||
@sed -i -e 's/Json/JSON/' $@ && rm -f ./-e
|
||||
|
||||
.PHONY: cross
|
||||
cross: minikube-linux-amd64 minikube-darwin-amd64 minikube-windows-amd64.exe ## Build minikube for all platform
|
||||
|
||||
|
@ -450,7 +431,8 @@ checksum: ## Generate checksums
|
|||
for f in out/minikube.iso out/minikube-linux-amd64 out/minikube-linux-arm \
|
||||
out/minikube-linux-arm64 out/minikube-linux-ppc64le out/minikube-linux-s390x \
|
||||
out/minikube-darwin-amd64 out/minikube-windows-amd64.exe \
|
||||
out/docker-machine-driver-kvm2 out/docker-machine-driver-hyperkit; do \
|
||||
out/docker-machine-driver-kvm2 out/docker-machine-driver-kvm2-amd64 out/docker-machine-driver-kvm2-arm64 \
|
||||
out/docker-machine-driver-hyperkit; do \
|
||||
if [ -f "$${f}" ]; then \
|
||||
openssl sha256 "$${f}" | awk '{print $$2}' > "$${f}.sha256" ; \
|
||||
fi ; \
|
||||
|
@ -490,7 +472,7 @@ goimports: ## Run goimports and list the files differs from goimport's
|
|||
@test -z "`goimports -l $(SOURCE_DIRS)`"
|
||||
|
||||
.PHONY: golint
|
||||
golint: $(SOURCE_GENERATED) ## Run golint
|
||||
golint: ## Run golint
|
||||
@golint -set_exit_status $(SOURCE_PACKAGES)
|
||||
|
||||
.PHONY: gocyclo
|
||||
|
@ -505,17 +487,17 @@ out/linters/golangci-lint-$(GOLINT_VERSION):
|
|||
# this one is meant for local use
|
||||
.PHONY: lint
|
||||
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
|
||||
lint: $(SOURCE_GENERATED)
|
||||
lint:
|
||||
docker run --rm -v $(pwd):/app -w /app golangci/golangci-lint:$(GOLINT_VERSION) \
|
||||
golangci-lint run ${GOLINT_OPTIONS} --skip-dirs "cmd/drivers/kvm|cmd/drivers/hyperkit|pkg/drivers/kvm|pkg/drivers/hyperkit" ./...
|
||||
else
|
||||
lint: $(SOURCE_GENERATED) out/linters/golangci-lint-$(GOLINT_VERSION) ## Run lint
|
||||
lint: out/linters/golangci-lint-$(GOLINT_VERSION) ## Run lint
|
||||
./out/linters/golangci-lint-$(GOLINT_VERSION) run ${GOLINT_OPTIONS} ./...
|
||||
endif
|
||||
|
||||
# lint-ci is slower version of lint and is meant to be used in ci (travis) to avoid out of memory leaks.
|
||||
.PHONY: lint-ci
|
||||
lint-ci: $(SOURCE_GENERATED) out/linters/golangci-lint-$(GOLINT_VERSION) ## Run lint-ci
|
||||
lint-ci: out/linters/golangci-lint-$(GOLINT_VERSION) ## Run lint-ci
|
||||
GOGC=${GOLINT_GOGC} ./out/linters/golangci-lint-$(GOLINT_VERSION) run \
|
||||
--concurrency ${GOLINT_JOBS} ${GOLINT_OPTIONS} ./...
|
||||
|
||||
|
@ -533,15 +515,15 @@ mdlint:
|
|||
verify-iso: # Make sure the current ISO exists in the expected bucket
|
||||
gsutil stat gs://$(ISO_BUCKET)/minikube-$(ISO_VERSION).iso
|
||||
|
||||
out/docs/minikube.md: $(shell find "cmd") $(shell find "pkg/minikube/constants") $(SOURCE_GENERATED)
|
||||
out/docs/minikube.md: $(shell find "cmd") $(shell find "pkg/minikube/constants")
|
||||
go run -ldflags="$(MINIKUBE_LDFLAGS)" -tags gendocs hack/help_text/gen_help_text.go
|
||||
|
||||
|
||||
.PHONY: debs ## Build all deb packages
|
||||
debs: out/minikube_$(DEB_VERSION)-$(DEB_REVISION)_amd64.deb \
|
||||
out/minikube_$(DEB_VERSION)-$(DEB_REVISION)_arm64.deb \
|
||||
out/docker-machine-driver-kvm2_$(DEB_VERSION).deb
|
||||
|
||||
out/docker-machine-driver-kvm2_$(DEB_VERSION).deb \
|
||||
out/docker-machine-driver-kvm2_$(DEB_VERSION)-$(DEB_REVISION)_amd64.deb \
|
||||
out/docker-machine-driver-kvm2_$(DEB_VERSION)-$(DEB_REVISION)_arm64.deb
|
||||
|
||||
.PHONY: deb_version
|
||||
deb_version:
|
||||
|
@ -662,8 +644,8 @@ release-hyperkit-driver: install-hyperkit-driver checksum ## Copy hyperkit using
|
|||
gsutil cp $(GOBIN)/docker-machine-driver-hyperkit.sha256 gs://minikube/drivers/hyperkit/$(VERSION)/
|
||||
|
||||
.PHONY: check-release
|
||||
check-release: $(SOURCE_GENERATED) ## Execute go test
|
||||
go test -v ./deploy/minikube/release_sanity_test.go -tags=release
|
||||
check-release: ## Execute go test
|
||||
go test -timeout 42m -v ./deploy/minikube/release_sanity_test.go
|
||||
|
||||
buildroot-image: $(ISO_BUILD_IMAGE) # convenient alias to build the docker container
|
||||
$(ISO_BUILD_IMAGE): deploy/iso/minikube-iso/Dockerfile
|
||||
|
@ -705,8 +687,23 @@ KICBASE_IMAGE_GCR ?= $(REGISTRY)/kicbase:$(KIC_VERSION)
|
|||
KICBASE_IMAGE_HUB ?= kicbase/stable:$(KIC_VERSION)
|
||||
KICBASE_IMAGE_REGISTRIES ?= $(KICBASE_IMAGE_GCR) $(KICBASE_IMAGE_HUB)
|
||||
|
||||
.PHONY: local-kicbase
|
||||
local-kicbase: ## Builds the kicbase image and tags it local/kicbase:latest and local/kicbase:$(KIC_VERSION)-$(COMMIT_SHORT)
|
||||
docker build -f ./deploy/kicbase/Dockerfile -t local/kicbase:$(KIC_VERSION) --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --cache-from $(KICBASE_IMAGE_GCR) .
|
||||
docker tag local/kicbase:$(KIC_VERSION) local/kicbase:latest
|
||||
docker tag local/kicbase:$(KIC_VERSION) local/kicbase:$(KIC_VERSION)-$(COMMIT_SHORT)
|
||||
|
||||
SED = sed -i
|
||||
ifeq ($(GOOS),darwin)
|
||||
SED = sed -i ''
|
||||
endif
|
||||
|
||||
.PHONY: local-kicbase-debug
|
||||
local-kicbase-debug: local-kicbase ## Builds a local kicbase image and switches source code to point to it
|
||||
$(SED) 's|Version = .*|Version = \"$(KIC_VERSION)-$(COMMIT_SHORT)\"|;s|baseImageSHA = .*|baseImageSHA = \"\"|;s|gcrRepo = .*|gcrRepo = \"local/kicbase\"|;s|dockerhubRepo = .*|dockerhubRepo = \"local/kicbase\"|' pkg/drivers/kic/types.go
|
||||
|
||||
.PHONY: push-kic-base-image
|
||||
push-kic-base-image: deploy/kicbase/auto-pause docker-multi-arch-builder ## Push multi-arch local/kicbase:latest to all remote registries
|
||||
push-kic-base-image: docker-multi-arch-builder ## Push multi-arch local/kicbase:latest to all remote registries
|
||||
ifdef AUTOPUSH
|
||||
docker login gcr.io/k8s-minikube
|
||||
docker login docker.pkg.github.com
|
||||
|
@ -717,7 +714,7 @@ endif
|
|||
ifndef CIBUILD
|
||||
$(call user_confirm, 'Are you sure you want to push $(KICBASE_IMAGE_REGISTRIES) ?')
|
||||
endif
|
||||
env $(X_BUILD_ENV) docker buildx build --builder $(X_DOCKER_BUILDER) --platform $(KICBASE_ARCH) $(addprefix -t ,$(KICBASE_IMAGE_REGISTRIES)) --push --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) ./deploy/kicbase
|
||||
env $(X_BUILD_ENV) docker buildx build -f ./deploy/kicbase/Dockerfile --builder $(X_DOCKER_BUILDER) --platform $(KICBASE_ARCH) $(addprefix -t ,$(KICBASE_IMAGE_REGISTRIES)) --push --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) .
|
||||
|
||||
out/preload-tool:
|
||||
go build -ldflags="$(MINIKUBE_LDFLAGS)" -o $@ ./hack/preload-images/*.go
|
||||
|
@ -753,7 +750,7 @@ endif
|
|||
docker push $(IMAGE)
|
||||
|
||||
.PHONY: out/gvisor-addon
|
||||
out/gvisor-addon: $(SOURCE_GENERATED) ## Build gvisor addon
|
||||
out/gvisor-addon: ## Build gvisor addon
|
||||
$(if $(quiet),@echo " GO $@")
|
||||
$(Q)GOOS=linux CGO_ENABLED=0 go build -o $@ cmd/gvisor/gvisor.go
|
||||
|
||||
|
@ -796,10 +793,84 @@ out/docker-machine-driver-kvm2-aarch64: out/docker-machine-driver-kvm2-arm64
|
|||
$(if $(quiet),@echo " CP $@")
|
||||
$(Q)cp $< $@
|
||||
|
||||
|
||||
out/docker-machine-driver-kvm2_$(DEB_VERSION).deb: out/docker-machine-driver-kvm2_$(DEB_VERSION)-0_amd64.deb
|
||||
cp $< $@
|
||||
|
||||
out/docker-machine-driver-kvm2_$(DEB_VERSION)-$(DEB_REVISION)_arm64.deb: out/docker-machine-driver-kvm2_$(DEB_VERSION)-0_aarch64.deb
|
||||
cp $< $@
|
||||
|
||||
out/docker-machine-driver-kvm2_$(DEB_VERSION)-0_%.deb: out/docker-machine-driver-kvm2-%
|
||||
cp -r installers/linux/deb/kvm2_deb_template out/docker-machine-driver-kvm2_$(DEB_VERSION)
|
||||
chmod 0755 out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN
|
||||
sed -E -i -e 's/--VERSION--/$(DEB_VERSION)/g' out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN/control
|
||||
sed -E -i -e 's/--ARCH--/'$*'/g' out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN/control
|
||||
mkdir -p out/docker-machine-driver-kvm2_$(DEB_VERSION)/usr/bin
|
||||
cp $< out/docker-machine-driver-kvm2_$(DEB_VERSION)/usr/bin/docker-machine-driver-kvm2
|
||||
fakeroot dpkg-deb --build out/docker-machine-driver-kvm2_$(DEB_VERSION) $@
|
||||
rm -rf out/docker-machine-driver-kvm2_$(DEB_VERSION)
|
||||
|
||||
out/docker-machine-driver-kvm2-$(RPM_VERSION).rpm: out/docker-machine-driver-kvm2-$(RPM_VERSION)-0.x86_64.rpm
|
||||
cp $< $@
|
||||
|
||||
out/docker-machine-driver-kvm2_$(RPM_VERSION).amd64.rpm: out/docker-machine-driver-kvm2-$(RPM_VERSION)-0.x86_64.rpm
|
||||
cp $< $@
|
||||
|
||||
out/docker-machine-driver-kvm2_$(RPM_VERSION).arm64.rpm: out/docker-machine-driver-kvm2-$(RPM_VERSION)-0.aarch64.rpm
|
||||
cp $< $@
|
||||
|
||||
out/docker-machine-driver-kvm2-$(RPM_VERSION)-0.%.rpm: out/docker-machine-driver-kvm2-%
|
||||
cp -r installers/linux/rpm/kvm2_rpm_template out/docker-machine-driver-kvm2-$(RPM_VERSION)
|
||||
sed -E -i -e 's/--VERSION--/'$(RPM_VERSION)'/g' out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
|
||||
sed -E -i -e 's|--OUT--|'$(PWD)/out'|g' out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
|
||||
rpmbuild -bb -D "_rpmdir $(PWD)/out" --target $* \
|
||||
out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
|
||||
@mv out/$*/docker-machine-driver-kvm2-$(RPM_VERSION)-0.$*.rpm out/ && rmdir out/$*
|
||||
rm -rf out/docker-machine-driver-kvm2-$(RPM_VERSION)
|
||||
|
||||
.PHONY: kvm-image-amd64
|
||||
kvm-image-amd64: installers/linux/kvm/Dockerfile.amd64 ## Convenient alias to build the docker container
|
||||
docker build --build-arg "GO_VERSION=$(KVM_GO_VERSION)" -t $(KVM_BUILD_IMAGE_AMD64) -f $< $(dir $<)
|
||||
@echo ""
|
||||
@echo "$(@) successfully built"
|
||||
|
||||
.PHONY: kvm-image-arm64
|
||||
kvm-image-arm64: installers/linux/kvm/Dockerfile.arm64 ## Convenient alias to build the docker container
|
||||
docker build --build-arg "GO_VERSION=$(KVM_GO_VERSION)" -t $(KVM_BUILD_IMAGE_ARM64) -f $< $(dir $<)
|
||||
@echo ""
|
||||
@echo "$(@) successfully built"
|
||||
|
||||
kvm_in_docker:
|
||||
docker image inspect -f '{{.Id}} {{.RepoTags}}' $(KVM_BUILD_IMAGE_AMD64) || $(MAKE) kvm-image-amd64
|
||||
rm -f out/docker-machine-driver-kvm2
|
||||
$(call DOCKER,$(KVM_BUILD_IMAGE_AMD64),/usr/bin/make out/docker-machine-driver-kvm2 COMMIT=$(COMMIT))
|
||||
|
||||
.PHONY: install-kvm-driver
|
||||
install-kvm-driver: out/docker-machine-driver-kvm2 ## Install KVM Driver
|
||||
mkdir -p $(GOBIN)
|
||||
cp out/docker-machine-driver-kvm2 $(GOBIN)/docker-machine-driver-kvm2
|
||||
|
||||
|
||||
out/docker-machine-driver-kvm2-arm64:
|
||||
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
|
||||
docker image inspect -f '{{.Id}} {{.RepoTags}}' $(KVM_BUILD_IMAGE_ARM64) || $(MAKE) kvm-image-arm64
|
||||
$(call DOCKER,$(KVM_BUILD_IMAGE_ARM64),/usr/bin/make $@ COMMIT=$(COMMIT))
|
||||
else
|
||||
$(if $(quiet),@echo " GO $@")
|
||||
$(Q)GOARCH=arm64 \
|
||||
go build \
|
||||
-installsuffix "static" \
|
||||
-ldflags="$(KVM2_LDFLAGS)" \
|
||||
-tags "libvirt.1.3.1 without_lxc" \
|
||||
-o $@ \
|
||||
k8s.io/minikube/cmd/drivers/kvm
|
||||
endif
|
||||
chmod +X $@
|
||||
|
||||
out/docker-machine-driver-kvm2-%:
|
||||
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
|
||||
docker image inspect -f '{{.Id}} {{.RepoTags}}' $(KVM_BUILD_IMAGE) || $(MAKE) kvm-image
|
||||
$(call DOCKER,$(KVM_BUILD_IMAGE),/usr/bin/make $@ COMMIT=$(COMMIT))
|
||||
docker image inspect -f '{{.Id}} {{.RepoTags}}' $(KVM_BUILD_IMAGE_AMD64) || $(MAKE) kvm-image-amd64
|
||||
$(call DOCKER,$(KVM_BUILD_IMAGE_AMD64),/usr/bin/make $@ COMMIT=$(COMMIT))
|
||||
# make extra sure that we are linking with the older version of libvirt (1.3.1)
|
||||
test "`strings $@ | grep '^LIBVIRT_[0-9]' | sort | tail -n 1`" = "LIBVIRT_1.2.9"
|
||||
else
|
||||
|
@ -814,51 +885,6 @@ else
|
|||
endif
|
||||
chmod +X $@
|
||||
|
||||
out/docker-machine-driver-kvm2_$(DEB_VERSION).deb: out/docker-machine-driver-kvm2_$(DEB_VERSION)-0_amd64.deb
|
||||
cp $< $@
|
||||
|
||||
out/docker-machine-driver-kvm2_$(DEB_VERSION)-0_%.deb: out/docker-machine-driver-kvm2-%
|
||||
cp -r installers/linux/deb/kvm2_deb_template out/docker-machine-driver-kvm2_$(DEB_VERSION)
|
||||
chmod 0755 out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN
|
||||
sed -E -i 's/--VERSION--/'$(DEB_VERSION)'/g' out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN/control
|
||||
sed -E -i 's/--ARCH--/'$*'/g' out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN/control
|
||||
mkdir -p out/docker-machine-driver-kvm2_$(DEB_VERSION)/usr/bin
|
||||
cp $< out/docker-machine-driver-kvm2_$(DEB_VERSION)/usr/bin/docker-machine-driver-kvm2
|
||||
fakeroot dpkg-deb --build out/docker-machine-driver-kvm2_$(DEB_VERSION) $@
|
||||
rm -rf out/docker-machine-driver-kvm2_$(DEB_VERSION)
|
||||
|
||||
out/docker-machine-driver-kvm2-$(RPM_VERSION).rpm: out/docker-machine-driver-kvm2-$(RPM_VERSION)-0.x86_64.deb
|
||||
cp $< $@
|
||||
|
||||
out/docker-machine-driver-kvm2-$(RPM_VERSION)-0.%.rpm: out/docker-machine-driver-kvm2-%
|
||||
cp -r installers/linux/rpm/kvm2_rpm_template out/docker-machine-driver-kvm2-$(RPM_VERSION)
|
||||
sed -E -i 's/--VERSION--/'$(RPM_VERSION)'/g' out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
|
||||
sed -E -i 's|--OUT--|'$(PWD)/out'|g' out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
|
||||
rpmbuild -bb -D "_rpmdir $(PWD)/out" --target $* \
|
||||
out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
|
||||
@mv out/$*/docker-machine-driver-kvm2-$(RPM_VERSION)-0.$*.rpm out/ && rmdir out/$*
|
||||
rm -rf out/docker-machine-driver-kvm2-$(RPM_VERSION)
|
||||
|
||||
.PHONY: kvm-image
|
||||
kvm-image: installers/linux/kvm/Dockerfile ## Convenient alias to build the docker container
|
||||
docker build --build-arg "GO_VERSION=$(KVM_GO_VERSION)" -t $(KVM_BUILD_IMAGE) -f $< $(dir $<)
|
||||
@echo ""
|
||||
@echo "$(@) successfully built"
|
||||
|
||||
kvm_in_docker:
|
||||
docker image inspect -f '{{.Id}} {{.RepoTags}}' $(KVM_BUILD_IMAGE) || $(MAKE) kvm-image
|
||||
rm -f out/docker-machine-driver-kvm2
|
||||
$(call DOCKER,$(KVM_BUILD_IMAGE),/usr/bin/make out/docker-machine-driver-kvm2 COMMIT=$(COMMIT))
|
||||
|
||||
.PHONY: install-kvm-driver
|
||||
install-kvm-driver: out/docker-machine-driver-kvm2 ## Install KVM Driver
|
||||
mkdir -p $(GOBIN)
|
||||
cp out/docker-machine-driver-kvm2 $(GOBIN)/docker-machine-driver-kvm2
|
||||
|
||||
.PHONY: release-kvm-driver
|
||||
release-kvm-driver: install-kvm-driver checksum ## Release KVM Driver
|
||||
gsutil cp $(GOBIN)/docker-machine-driver-kvm2 gs://minikube/drivers/kvm/$(VERSION)/
|
||||
gsutil cp $(GOBIN)/docker-machine-driver-kvm2.sha256 gs://minikube/drivers/kvm/$(VERSION)/
|
||||
|
||||
site/themes/docsy/assets/vendor/bootstrap/package.js: ## update the website docsy theme git submodule
|
||||
git submodule update -f --init --recursive
|
||||
|
@ -866,8 +892,7 @@ site/themes/docsy/assets/vendor/bootstrap/package.js: ## update the website docs
|
|||
out/hugo/hugo:
|
||||
mkdir -p out
|
||||
test -d out/hugo || git clone https://github.com/gohugoio/hugo.git out/hugo
|
||||
go get golang.org/dl/go1.16 && go1.16 download
|
||||
(cd out/hugo && go1.16 build --tags extended)
|
||||
(cd out/hugo && go build --tags extended)
|
||||
|
||||
.PHONY: site
|
||||
site: site/themes/docsy/assets/vendor/bootstrap/package.js out/hugo/hugo ## Serve the documentation site to localhost
|
||||
|
@ -881,17 +906,14 @@ site: site/themes/docsy/assets/vendor/bootstrap/package.js out/hugo/hugo ## Serv
|
|||
out/mkcmp:
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o $@ cmd/performance/mkcmp/main.go
|
||||
|
||||
.PHONY: deploy/kicbase/auto-pause # auto pause binary to be used for kic image work around for not passing the whole repo as docker context
|
||||
deploy/kicbase/auto-pause: $(SOURCE_GENERATED) $(SOURCE_FILES)
|
||||
GOOS=linux GOARCH=$(GOARCH) go build -o $@ cmd/auto-pause/auto-pause.go
|
||||
|
||||
# auto pause binary to be used for ISO
|
||||
deploy/iso/minikube-iso/board/coreos/minikube/rootfs-overlay/usr/bin/auto-pause: $(SOURCE_GENERATED) $(SOURCE_FILES)
|
||||
deploy/iso/minikube-iso/board/coreos/minikube/rootfs-overlay/usr/bin/auto-pause: $(SOURCE_FILES) $(ASSET_FILES)
|
||||
GOOS=linux GOARCH=$(GOARCH) go build -o $@ cmd/auto-pause/auto-pause.go
|
||||
|
||||
|
||||
.PHONY: deploy/addons/auto-pause/auto-pause-hook
|
||||
deploy/addons/auto-pause/auto-pause-hook: $(SOURCE_GENERATED) ## Build auto-pause hook addon
|
||||
deploy/addons/auto-pause/auto-pause-hook: ## Build auto-pause hook addon
|
||||
$(if $(quiet),@echo " GO $@")
|
||||
$(Q)GOOS=linux CGO_ENABLED=0 go build -a --ldflags '-extldflags "-static"' -tags netgo -installsuffix netgo -o $@ cmd/auto-pause/auto-pause-hook/main.go cmd/auto-pause/auto-pause-hook/config.go cmd/auto-pause/auto-pause-hook/certs.go
|
||||
|
||||
|
|
2
OWNERS
2
OWNERS
|
@ -10,12 +10,14 @@ reviewers:
|
|||
- prasadkatti
|
||||
- ilya-zuyev
|
||||
- prezha
|
||||
- spowelljr
|
||||
approvers:
|
||||
- tstromberg
|
||||
- afbjorklund
|
||||
- sharifelgamal
|
||||
- medyagh
|
||||
- ilya-zuyev
|
||||
- spowelljr
|
||||
emeritus_approvers:
|
||||
- dlorenc
|
||||
- luxas
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
@ -39,10 +40,11 @@ var mu sync.Mutex
|
|||
var runtimePaused bool
|
||||
var version = "0.0.1"
|
||||
|
||||
// TODO: #10597 make this configurable to support containerd/cri-o
|
||||
var runtime = "docker"
|
||||
var runtime = flag.String("container-runtime", "docker", "Container runtime to use for (un)pausing")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
// TODO: #10595 make this configurable
|
||||
const interval = time.Minute * 1
|
||||
|
||||
|
@ -89,7 +91,7 @@ func runPause() {
|
|||
|
||||
r := command.NewExecRunner(true)
|
||||
|
||||
cr, err := cruntime.New(cruntime.Config{Type: runtime, Runner: r})
|
||||
cr, err := cruntime.New(cruntime.Config{Type: *runtime, Runner: r})
|
||||
if err != nil {
|
||||
exit.Error(reason.InternalNewRuntime, "Failed runtime", err)
|
||||
}
|
||||
|
@ -111,7 +113,7 @@ func runUnpause() {
|
|||
|
||||
r := command.NewExecRunner(true)
|
||||
|
||||
cr, err := cruntime.New(cruntime.Config{Type: runtime, Runner: r})
|
||||
cr, err := cruntime.New(cruntime.Config{Type: *runtime, Runner: r})
|
||||
if err != nil {
|
||||
exit.Error(reason.InternalNewRuntime, "Failed runtime", err)
|
||||
}
|
||||
|
@ -130,7 +132,7 @@ func alreadyPaused() {
|
|||
defer mu.Unlock()
|
||||
|
||||
r := command.NewExecRunner(true)
|
||||
cr, err := cruntime.New(cruntime.Config{Type: runtime, Runner: r})
|
||||
cr, err := cruntime.New(cruntime.Config{Type: *runtime, Runner: r})
|
||||
if err != nil {
|
||||
exit.Error(reason.InternalNewRuntime, "Failed runtime", err)
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ var printAddonsList = func(cc *config.ClusterConfig) {
|
|||
|
||||
var tData [][]string
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
table.SetHeader([]string{"Addon Name", "Profile", "Status"})
|
||||
table.SetHeader([]string{"Addon Name", "Profile", "Status", "Maintainer"})
|
||||
table.SetAutoFormatHeaders(true)
|
||||
table.SetBorders(tablewriter.Border{Left: true, Top: true, Right: true, Bottom: true})
|
||||
table.SetCenterSeparator("|")
|
||||
|
@ -106,7 +106,11 @@ var printAddonsList = func(cc *config.ClusterConfig) {
|
|||
for _, addonName := range addonNames {
|
||||
addonBundle := assets.Addons[addonName]
|
||||
enabled := addonBundle.IsEnabled(cc)
|
||||
tData = append(tData, []string{addonName, cc.Name, fmt.Sprintf("%s %s", stringFromStatus(enabled), iconFromStatus(enabled))})
|
||||
maintainer := addonBundle.Maintainer
|
||||
if maintainer == "" {
|
||||
maintainer = "unknown (third-party)"
|
||||
}
|
||||
tData = append(tData, []string{addonName, cc.Name, fmt.Sprintf("%s %s", stringFromStatus(enabled), iconFromStatus(enabled)), maintainer})
|
||||
}
|
||||
|
||||
table.AppendBulk(tData)
|
||||
|
|
|
@ -76,7 +76,7 @@ var settings = []Setting{
|
|||
{
|
||||
name: "cpus",
|
||||
set: SetInt,
|
||||
validations: []setFn{IsPositive},
|
||||
validations: []setFn{IsValidCPUs},
|
||||
callbacks: []setFn{RequiresRestartMsg},
|
||||
},
|
||||
{
|
||||
|
@ -122,18 +122,6 @@ var settings = []Setting{
|
|||
name: config.ReminderWaitPeriodInHours,
|
||||
set: SetInt,
|
||||
},
|
||||
{
|
||||
name: config.WantReportError,
|
||||
set: SetBool,
|
||||
},
|
||||
{
|
||||
name: config.WantReportErrorPrompt,
|
||||
set: SetBool,
|
||||
},
|
||||
{
|
||||
name: config.WantKubectlDownloadMsg,
|
||||
set: SetBool,
|
||||
},
|
||||
{
|
||||
name: config.WantNoneDriverWarning,
|
||||
set: SetBool,
|
||||
|
@ -146,14 +134,6 @@ var settings = []Setting{
|
|||
name: Bootstrapper,
|
||||
set: SetString,
|
||||
},
|
||||
{
|
||||
name: config.ShowDriverDeprecationNotification,
|
||||
set: SetBool,
|
||||
},
|
||||
{
|
||||
name: config.ShowBootstrapperDeprecationNotification,
|
||||
set: SetBool,
|
||||
},
|
||||
{
|
||||
name: "insecure-registry",
|
||||
set: SetString,
|
||||
|
@ -172,7 +152,7 @@ var settings = []Setting{
|
|||
setMap: SetMap,
|
||||
},
|
||||
{
|
||||
name: "embed-certs",
|
||||
name: config.EmbedCerts,
|
||||
set: SetBool,
|
||||
},
|
||||
{
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"strings"
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -53,8 +54,19 @@ func IsValidDiskSize(name string, disksize string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// IsValidCPUs checks if a string is a valid number of CPUs
|
||||
func IsValidCPUs(name string, cpus string) error {
|
||||
if cpus == constants.MaxResources {
|
||||
return nil
|
||||
}
|
||||
return IsPositive(name, cpus)
|
||||
}
|
||||
|
||||
// IsValidMemory checks if a string is a valid memory size
|
||||
func IsValidMemory(name string, memsize string) error {
|
||||
if memsize == constants.MaxResources {
|
||||
return nil
|
||||
}
|
||||
_, err := units.FromHumanSize(memsize)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid memory size: %v", err)
|
||||
|
|
|
@ -87,6 +87,24 @@ func (error DeletionError) Error() string {
|
|||
return error.Err.Error()
|
||||
}
|
||||
|
||||
var hostAndDirsDeleter = func(api libmachine.API, cc *config.ClusterConfig, profileName string) error {
|
||||
if err := killMountProcess(); err != nil {
|
||||
out.FailureT("Failed to kill mount process: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
|
||||
deleteHosts(api, cc)
|
||||
|
||||
// In case DeleteHost didn't complete the job.
|
||||
deleteProfileDirectory(profileName)
|
||||
deleteMachineDirectories(cc)
|
||||
|
||||
if err := deleteConfig(profileName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return deleteContext(profileName)
|
||||
}
|
||||
|
||||
func init() {
|
||||
deleteCmd.Flags().BoolVar(&deleteAll, "all", false, "Set flag to delete all profiles")
|
||||
deleteCmd.Flags().BoolVar(&purge, "purge", false, "Set this flag to delete the '.minikube' folder from your user directory.")
|
||||
|
@ -282,23 +300,10 @@ func deleteProfile(ctx context.Context, profile *config.Profile) error {
|
|||
}
|
||||
}
|
||||
|
||||
if err := killMountProcess(); err != nil {
|
||||
out.FailureT("Failed to kill mount process: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
|
||||
deleteHosts(api, cc)
|
||||
|
||||
// In case DeleteHost didn't complete the job.
|
||||
deleteProfileDirectory(profile.Name)
|
||||
deleteMachineDirectories(cc)
|
||||
|
||||
if err := deleteConfig(profile.Name); err != nil {
|
||||
if err := hostAndDirsDeleter(api, cc, profile.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := deleteContext(profile.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Step(style.Deleted, `Removed all traces of the "{{.name}}" cluster.`, out.V{"name": profile.Name})
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,15 +17,18 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/machine/libmachine"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/otiai10/copy"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
)
|
||||
|
@ -114,6 +117,7 @@ func TestDeleteProfile(t *testing.T) {
|
|||
t.Logf("load failure: %v", err)
|
||||
}
|
||||
|
||||
hostAndDirsDeleter = hostAndDirsDeleterMock
|
||||
errs := DeleteProfiles([]*config.Profile{profile})
|
||||
if len(errs) > 0 {
|
||||
HandleDeletionErrors(errs)
|
||||
|
@ -154,6 +158,17 @@ func TestDeleteProfile(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
var hostAndDirsDeleterMock = func(api libmachine.API, cc *config.ClusterConfig, profileName string) error {
|
||||
return deleteContextTest()
|
||||
}
|
||||
|
||||
func deleteContextTest() error {
|
||||
if err := cmdcfg.Unset(config.ProfileName); err != nil {
|
||||
return DeletionError{Err: fmt.Errorf("unset minikube profile: %v", err), Errtype: Fatal}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestDeleteAllProfiles(t *testing.T) {
|
||||
td, err := ioutil.TempDir("", "all")
|
||||
if err != nil {
|
||||
|
@ -207,6 +222,7 @@ func TestDeleteAllProfiles(t *testing.T) {
|
|||
}
|
||||
|
||||
profiles := append(validProfiles, inValidProfiles...)
|
||||
hostAndDirsDeleter = hostAndDirsDeleterMock
|
||||
errs := DeleteProfiles(profiles)
|
||||
|
||||
if errs != nil {
|
||||
|
|
|
@ -17,43 +17,15 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/minikube/pkg/generate"
|
||||
)
|
||||
|
||||
func TestGenerateDocs(t *testing.T) {
|
||||
pflag.BoolP("help", "h", false, "") // avoid 'Docs are not updated. Please run `make generate-docs` to update commands documentation' error
|
||||
dir := "../../../site/content/en/docs/commands/"
|
||||
|
||||
for _, sc := range RootCmd.Commands() {
|
||||
t.Run(sc.Name(), func(t *testing.T) {
|
||||
if sc.Hidden {
|
||||
t.Skip()
|
||||
}
|
||||
fp := filepath.Join(dir, fmt.Sprintf("%s.md", sc.Name()))
|
||||
expectedContents, err := ioutil.ReadFile(fp)
|
||||
if err != nil {
|
||||
t.Fatalf("Docs are not updated. Please run `make generate-docs` to update commands documentation: %v", err)
|
||||
}
|
||||
actualContents, err := generate.DocForCommand(sc)
|
||||
if err != nil {
|
||||
t.Fatalf("error getting contents: %v", err)
|
||||
}
|
||||
if diff := cmp.Diff(actualContents, string(expectedContents)); diff != "" {
|
||||
t.Fatalf("Docs are not updated. Please run `make generate-docs` to update commands documentation: %s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateTestDocs(t *testing.T) {
|
||||
tempdir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
|
@ -61,12 +33,6 @@ func TestGenerateTestDocs(t *testing.T) {
|
|||
}
|
||||
defer os.RemoveAll(tempdir)
|
||||
docPath := filepath.Join(tempdir, "tests.md")
|
||||
realPath := "../../../site/content/en/docs/contrib/tests.en.md"
|
||||
|
||||
expectedContents, err := ioutil.ReadFile(realPath)
|
||||
if err != nil {
|
||||
t.Fatalf("error reading existing file: %v", err)
|
||||
}
|
||||
|
||||
err = generate.TestDocs(docPath, "../../../test/integration")
|
||||
if err != nil {
|
||||
|
@ -77,10 +43,6 @@ func TestGenerateTestDocs(t *testing.T) {
|
|||
t.Fatalf("error reading generated file: %v", err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(string(actualContents), string(expectedContents)); diff != "" {
|
||||
t.Errorf("Test docs are not updated. Please run `make generate-docs` to update documentation: %s", diff)
|
||||
}
|
||||
|
||||
rest := string(actualContents)
|
||||
for rest != "" {
|
||||
rest = checkForNeedsDoc(t, rest)
|
||||
|
|
|
@ -104,7 +104,7 @@ func runPause(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
|
||||
func init() {
|
||||
pauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", constants.DefaultNamespaces, "namespaces to pause")
|
||||
pauseCmd.Flags().StringSliceVarP(&namespaces, "namespaces", "n", constants.DefaultNamespaces, "namespaces to pause")
|
||||
pauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, pause all namespaces")
|
||||
pauseCmd.Flags().StringVarP(&outputFormat, "output", "o", "text", "Format to print stdout in. Options include: [text,json]")
|
||||
}
|
||||
|
|
|
@ -25,9 +25,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/minikube/pkg/minikube/notify"
|
||||
"k8s.io/minikube/pkg/version"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
|
@ -41,9 +38,11 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/detect"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/notify"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/reason"
|
||||
"k8s.io/minikube/pkg/minikube/translate"
|
||||
"k8s.io/minikube/pkg/version"
|
||||
)
|
||||
|
||||
var dirs = [...]string{
|
||||
|
@ -91,16 +90,17 @@ func Execute() {
|
|||
}
|
||||
}
|
||||
if !found {
|
||||
exit.Message(reason.WrongBinaryWSL, "You are trying to run windows .exe binary inside WSL, for better integration please use Linux binary instead (Download at https://minikube.sigs.k8s.io/docs/start/.). Otherwise if you still want to do this, you can do it using --force")
|
||||
exit.Message(reason.WrongBinaryWSL, "You are trying to run a windows .exe binary inside WSL. For better integration please use a Linux binary instead (Download at https://minikube.sigs.k8s.io/docs/start/.). Otherwise if you still want to do this, you can do it using --force")
|
||||
}
|
||||
}
|
||||
|
||||
if runtime.GOOS == "darwin" && detect.IsAmd64M1Emulation() {
|
||||
exit.Message(reason.WrongBinaryM1, "You are trying to run amd64 binary on M1 system. Please use darwin/arm64 binary instead (Download at {{.url}}.)",
|
||||
out.V{"url": notify.DownloadURL(version.GetVersion(), "darwin", "amd64")})
|
||||
out.Infof("You are trying to run amd64 binary on M1 system. Please consider running darwin/arm64 binary instead (Download at {{.url}}.)",
|
||||
out.V{"url": notify.DownloadURL(version.GetVersion(), "darwin", "arm64")})
|
||||
}
|
||||
|
||||
_, callingCmd := filepath.Split(os.Args[0])
|
||||
callingCmd = strings.TrimSuffix(callingCmd, ".exe")
|
||||
|
||||
if callingCmd == "kubectl" {
|
||||
// If the user is using the minikube binary as kubectl, allow them to specify the kubectl context without also specifying minikube profile
|
||||
|
@ -301,14 +301,10 @@ func setupViper() {
|
|||
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
|
||||
viper.AutomaticEnv()
|
||||
|
||||
viper.RegisterAlias(config.EmbedCerts, embedCerts)
|
||||
viper.SetDefault(config.WantUpdateNotification, true)
|
||||
viper.SetDefault(config.ReminderWaitPeriodInHours, 24)
|
||||
viper.SetDefault(config.WantReportError, false)
|
||||
viper.SetDefault(config.WantReportErrorPrompt, true)
|
||||
viper.SetDefault(config.WantKubectlDownloadMsg, true)
|
||||
viper.SetDefault(config.WantNoneDriverWarning, true)
|
||||
viper.SetDefault(config.ShowDriverDeprecationNotification, true)
|
||||
viper.SetDefault(config.ShowBootstrapperDeprecationNotification, true)
|
||||
}
|
||||
|
||||
func addToPath(dir string) {
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/docker/machine/libmachine/ssh"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
|
@ -162,14 +162,13 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
// can be configured as MINIKUBE_IMAGE_REPOSITORY and IMAGE_MIRROR_COUNTRY
|
||||
// this should be updated to documentation
|
||||
if len(registryMirror) == 0 {
|
||||
registryMirror = viper.GetStringSlice("registry_mirror")
|
||||
registryMirror = viper.GetStringSlice("registry-mirror")
|
||||
}
|
||||
|
||||
if !config.ProfileNameValid(ClusterFlagValue()) {
|
||||
out.WarningT("Profile name '{{.name}}' is not valid", out.V{"name": ClusterFlagValue()})
|
||||
exit.Message(reason.Usage, "Only alphanumeric and dashes '-' are permitted. Minimum 2 characters, starting with alphanumeric.")
|
||||
}
|
||||
|
||||
existing, err := config.Load(ClusterFlagValue())
|
||||
if err != nil && !config.IsNotExist(err) {
|
||||
kind := reason.HostConfigLoad
|
||||
|
@ -1031,56 +1030,49 @@ func validateRequestedMemorySize(req int, drvName string) {
|
|||
|
||||
// validateCPUCount validates the cpu count matches the minimum recommended & not exceeding the available cpu count
|
||||
func validateCPUCount(drvName string) {
|
||||
var cpuCount int
|
||||
if driver.BareMetal(drvName) {
|
||||
var availableCPUs int
|
||||
|
||||
// Uses the gopsutil cpu package to count the number of logical cpu cores
|
||||
cpuCount := getCPUCount(drvName)
|
||||
isKIC := driver.IsKIC(drvName)
|
||||
|
||||
if isKIC {
|
||||
si, err := oci.CachedDaemonInfo(drvName)
|
||||
if err != nil {
|
||||
si, err = oci.DaemonInfo(drvName)
|
||||
if err != nil {
|
||||
exit.Message(reason.Usage, "Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)})
|
||||
}
|
||||
}
|
||||
availableCPUs = si.CPUs
|
||||
} else {
|
||||
ci, err := cpu.Counts(true)
|
||||
if err != nil {
|
||||
klog.Warningf("Unable to get CPU info: %v", err)
|
||||
} else {
|
||||
cpuCount = ci
|
||||
exit.Message(reason.Usage, "Unable to get CPU info: {{.err}}", out.V{"err": err})
|
||||
}
|
||||
} else {
|
||||
cpuCount = viper.GetInt(cpus)
|
||||
availableCPUs = ci
|
||||
}
|
||||
|
||||
if cpuCount < minimumCPUS {
|
||||
exitIfNotForced(reason.RsrcInsufficientCores, "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}", out.V{"requested_cpus": cpuCount, "minimum_cpus": minimumCPUS})
|
||||
}
|
||||
|
||||
if !driver.IsKIC((drvName)) {
|
||||
return
|
||||
}
|
||||
|
||||
si, err := oci.CachedDaemonInfo(drvName)
|
||||
if err != nil {
|
||||
out.Styled(style.Confused, "Failed to verify '{{.driver_name}} info' will try again ...", out.V{"driver_name": drvName})
|
||||
si, err = oci.DaemonInfo(drvName)
|
||||
if err != nil {
|
||||
exit.Message(reason.Usage, "Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if si.CPUs < cpuCount {
|
||||
|
||||
if availableCPUs < cpuCount {
|
||||
if driver.IsDockerDesktop(drvName) {
|
||||
out.Styled(style.Empty, `- Ensure your {{.driver_name}} daemon has access to enough CPU/memory resources.`, out.V{"driver_name": drvName})
|
||||
if runtime.GOOS == "darwin" {
|
||||
out.Styled(style.Empty, `- Docs https://docs.docker.com/docker-for-mac/#resources`, out.V{"driver_name": drvName})
|
||||
out.Styled(style.Empty, `- Docs https://docs.docker.com/docker-for-mac/#resources`)
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
out.String("\n\t")
|
||||
out.Styled(style.Empty, `- Docs https://docs.docker.com/docker-for-windows/#resources`, out.V{"driver_name": drvName})
|
||||
out.Styled(style.Empty, `- Docs https://docs.docker.com/docker-for-windows/#resources`)
|
||||
}
|
||||
}
|
||||
|
||||
exitIfNotForced(reason.RsrcInsufficientCores, "Requested cpu count {{.requested_cpus}} is greater than the available cpus of {{.avail_cpus}}", out.V{"requested_cpus": cpuCount, "avail_cpus": si.CPUs})
|
||||
exitIfNotForced(reason.RsrcInsufficientCores, "Requested cpu count {{.requested_cpus}} is greater than the available cpus of {{.avail_cpus}}", out.V{"requested_cpus": cpuCount, "avail_cpus": availableCPUs})
|
||||
}
|
||||
|
||||
// looks good
|
||||
if si.CPUs >= 2 {
|
||||
if availableCPUs >= 2 {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1237,13 +1229,32 @@ func validateChangedMemoryFlags(drvName string) {
|
|||
if !driver.HasResourceLimits(drvName) {
|
||||
out.WarningT("The '{{.name}}' driver does not respect the --memory flag", out.V{"name": drvName})
|
||||
}
|
||||
req, err := util.CalculateSizeInMB(viper.GetString(memory))
|
||||
if err != nil {
|
||||
exitIfNotForced(reason.Usage, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
|
||||
var req int
|
||||
var err error
|
||||
memString := viper.GetString(memory)
|
||||
if memString == constants.MaxResources {
|
||||
sysLimit, containerLimit, err := memoryLimits(drvName)
|
||||
if err != nil {
|
||||
klog.Warningf("Unable to query memory limits: %+v", err)
|
||||
}
|
||||
req = noLimitMemory(sysLimit, containerLimit)
|
||||
} else {
|
||||
req, err = util.CalculateSizeInMB(memString)
|
||||
if err != nil {
|
||||
exitIfNotForced(reason.Usage, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": memString, "error": err})
|
||||
}
|
||||
}
|
||||
validateRequestedMemorySize(req, drvName)
|
||||
}
|
||||
|
||||
func noLimitMemory(sysLimit int, containerLimit int) int {
|
||||
if containerLimit != 0 {
|
||||
return containerLimit
|
||||
}
|
||||
// Recommend 1GB to handle OS/VM overhead
|
||||
return sysLimit - 1024
|
||||
}
|
||||
|
||||
// This function validates if the --registry-mirror
|
||||
// args match the format of http://localhost
|
||||
func validateRegistryMirror() {
|
||||
|
@ -1529,5 +1540,5 @@ func exitGuestProvision(err error) {
|
|||
if errors.Cause(err) == oci.ErrGetSSHPortContainerNotRunning {
|
||||
exit.Message(reason.GuestProvisionContainerExited, "Docker container exited prematurely after it was created, consider investigating Docker's performance/health.")
|
||||
}
|
||||
exit.Error(reason.GuestProvision, "error provisioning host", err)
|
||||
exit.Error(reason.GuestProvision, "error provisioning guest", err)
|
||||
}
|
||||
|
|
|
@ -21,12 +21,14 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/shirou/gopsutil/v3/cpu"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/drivers/kic"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
|
||||
"k8s.io/minikube/pkg/minikube/cni"
|
||||
|
@ -135,8 +137,8 @@ func initMinikubeFlags() {
|
|||
startCmd.Flags().Bool(interactive, true, "Allow user prompts for more information")
|
||||
startCmd.Flags().Bool(dryRun, false, "dry-run mode. Validates configuration, but does not mutate system state")
|
||||
|
||||
startCmd.Flags().Int(cpus, 2, "Number of CPUs allocated to Kubernetes.")
|
||||
startCmd.Flags().String(memory, "", "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>], where unit = b, k, m or g).")
|
||||
startCmd.Flags().String(cpus, "2", fmt.Sprintf("Number of CPUs allocated to Kubernetes. Use %q to use the maximum number of CPUs.", constants.MaxResources))
|
||||
startCmd.Flags().String(memory, "", fmt.Sprintf("Amount of RAM to allocate to Kubernetes (format: <number>[<unit>], where unit = b, k, m or g). Use %q to use the maximum amount of memory.", constants.MaxResources))
|
||||
startCmd.Flags().String(humanReadableDiskSize, defaultDiskSize, "Disk size allocated to the minikube VM (format: <number>[<unit>], where unit = b, k, m or g).")
|
||||
startCmd.Flags().Bool(downloadOnly, false, "If true, only download and cache files for later use - don't install or start anything.")
|
||||
startCmd.Flags().Bool(cacheImages, true, "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none.")
|
||||
|
@ -290,6 +292,30 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
|
|||
return createNode(cc, kubeNodeName, existing)
|
||||
}
|
||||
|
||||
func getCPUCount(drvName string) int {
|
||||
if viper.GetString(cpus) != constants.MaxResources {
|
||||
return viper.GetInt(cpus)
|
||||
}
|
||||
|
||||
if !driver.IsKIC(drvName) {
|
||||
ci, err := cpu.Counts(true)
|
||||
if err != nil {
|
||||
exit.Message(reason.Usage, "Unable to get CPU info: {{.err}}", out.V{"err": err})
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
si, err := oci.CachedDaemonInfo(drvName)
|
||||
if err != nil {
|
||||
si, err = oci.DaemonInfo(drvName)
|
||||
if err != nil {
|
||||
exit.Message(reason.Usage, "Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)})
|
||||
}
|
||||
}
|
||||
|
||||
return si.CPUs
|
||||
}
|
||||
|
||||
func getMemorySize(cmd *cobra.Command, drvName string) int {
|
||||
sysLimit, containerLimit, err := memoryLimits(drvName)
|
||||
if err != nil {
|
||||
|
@ -298,10 +324,15 @@ func getMemorySize(cmd *cobra.Command, drvName string) int {
|
|||
|
||||
mem := suggestMemoryAllocation(sysLimit, containerLimit, viper.GetInt(nodes))
|
||||
if cmd.Flags().Changed(memory) || viper.IsSet(memory) {
|
||||
memString := viper.GetString(memory)
|
||||
var err error
|
||||
mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory))
|
||||
if err != nil {
|
||||
exit.Message(reason.Usage, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
|
||||
if memString == constants.MaxResources {
|
||||
mem = noLimitMemory(sysLimit, containerLimit)
|
||||
} else {
|
||||
mem, err = pkgutil.CalculateSizeInMB(memString)
|
||||
if err != nil {
|
||||
exit.Message(reason.Usage, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": memString, "error": err})
|
||||
}
|
||||
}
|
||||
if driver.IsKIC(drvName) && mem > containerLimit {
|
||||
exit.Message(reason.Usage, "{{.driver_name}} has only {{.container_limit}}MB memory but you specified {{.specified_memory}}MB", out.V{"container_limit": containerLimit, "specified_memory": mem, "driver_name": driver.FullName(drvName)})
|
||||
|
@ -384,7 +415,7 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s
|
|||
KicBaseImage: viper.GetString(kicBaseImage),
|
||||
Network: viper.GetString(network),
|
||||
Memory: getMemorySize(cmd, drvName),
|
||||
CPUs: viper.GetInt(cpus),
|
||||
CPUs: getCPUCount(drvName),
|
||||
DiskSize: getDiskSize(),
|
||||
Driver: drvName,
|
||||
ListenAddress: viper.GetString(listenAddress),
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ func init() {
|
|||
stopCmd.Flags().StringVarP(&outputFormat, "output", "o", "text", "Format to print stdout in. Options include: [text,json]")
|
||||
|
||||
if err := viper.GetViper().BindPFlags(stopCmd.Flags()); err != nil {
|
||||
exit.Error(reason.InternalFlagsBind, "unable to bind flags", err)
|
||||
exit.Error(reason.InternalBindFlags, "unable to bind flags", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ var unpauseCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func init() {
|
||||
unpauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", constants.DefaultNamespaces, "namespaces to unpause")
|
||||
unpauseCmd.Flags().StringSliceVarP(&namespaces, "namespaces", "n", constants.DefaultNamespaces, "namespaces to unpause")
|
||||
unpauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, unpause all namespaces")
|
||||
unpauseCmd.Flags().StringVarP(&outputFormat, "output", "o", "text", "Format to print stdout in. Options include: [text,json]")
|
||||
}
|
||||
|
|
|
@ -18,18 +18,23 @@ package cmd
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/reason"
|
||||
"k8s.io/minikube/pkg/version"
|
||||
)
|
||||
|
||||
var (
|
||||
versionOutput string
|
||||
shortVersion bool
|
||||
versionOutput string
|
||||
shortVersion bool
|
||||
listComponentsVersions bool
|
||||
)
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
|
@ -43,6 +48,33 @@ var versionCmd = &cobra.Command{
|
|||
"minikubeVersion": minikubeVersion,
|
||||
"commit": gitCommitID,
|
||||
}
|
||||
|
||||
if listComponentsVersions && !shortVersion {
|
||||
co := mustload.Running(ClusterFlagValue())
|
||||
runner := co.CP.Runner
|
||||
versionCMDS := map[string]*exec.Cmd{
|
||||
"docker": exec.Command("docker", "version", "--format={{.Client.Version}}"),
|
||||
"containerd": exec.Command("containerd", "--version"),
|
||||
"crio": exec.Command("crio", "version"),
|
||||
"podman": exec.Command("sudo", "podman", "version"),
|
||||
"crictl": exec.Command("sudo", "crictl", "version"),
|
||||
"buildctl": exec.Command("buildctl", "--version"),
|
||||
"ctr": exec.Command("sudo", "ctr", "version"),
|
||||
"runc": exec.Command("runc", "--version"),
|
||||
}
|
||||
for k, v := range versionCMDS {
|
||||
rr, err := runner.RunCmd(v)
|
||||
if err != nil {
|
||||
klog.Warningf("error getting %s's version: %v", k, err)
|
||||
data[k] = "error"
|
||||
} else {
|
||||
data[k] = strings.TrimSpace(rr.Stdout.String())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
switch versionOutput {
|
||||
case "":
|
||||
if !shortVersion {
|
||||
|
@ -50,6 +82,15 @@ var versionCmd = &cobra.Command{
|
|||
if gitCommitID != "" {
|
||||
out.Ln("commit: %v", gitCommitID)
|
||||
}
|
||||
for k, v := range data {
|
||||
// for backward compatibility we keep displaying the old way for these two
|
||||
if k == "minikubeVersion" || k == "commit" {
|
||||
continue
|
||||
}
|
||||
if v != "" {
|
||||
out.Ln("\n%s:\n%s", k, v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.Ln("%v", minikubeVersion)
|
||||
}
|
||||
|
@ -74,4 +115,5 @@ var versionCmd = &cobra.Command{
|
|||
func init() {
|
||||
versionCmd.Flags().StringVarP(&versionOutput, "output", "o", "", "One of 'yaml' or 'json'.")
|
||||
versionCmd.Flags().BoolVar(&shortVersion, "short", false, "Print just the version number.")
|
||||
versionCmd.Flags().BoolVar(&listComponentsVersions, "components", false, "list versions of all components included with minikube. (the cluster must be running)")
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/klog/v2"
|
||||
|
@ -67,6 +68,7 @@ func main() {
|
|||
|
||||
// Don't parse flags when running as kubectl
|
||||
_, callingCmd := filepath.Split(os.Args[0])
|
||||
callingCmd = strings.TrimSuffix(callingCmd, ".exe")
|
||||
parse := callingCmd != "kubectl"
|
||||
setFlags(parse)
|
||||
|
||||
|
|
|
@ -0,0 +1,134 @@
|
|||
/*
|
||||
Copyright 2021 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package addons
|
||||
|
||||
import "embed"
|
||||
|
||||
var (
|
||||
// AutoPauseAssets assets for auto-pause addon
|
||||
//go:embed auto-pause/*.tmpl
|
||||
//go:embed auto-pause/unpause.lua
|
||||
AutoPauseAssets embed.FS
|
||||
|
||||
// DashboardAssets assets for dashboard addon
|
||||
//go:embed dashboard/*.yaml dashboard/*.tmpl
|
||||
DashboardAssets embed.FS
|
||||
|
||||
// DefaultStorageClassAssets assets for default-storageclass addon
|
||||
//go:embed storageclass/storageclass.yaml.tmpl
|
||||
DefaultStorageClassAssets embed.FS
|
||||
|
||||
// PodSecurityPolicyAssets assets for pod-security-policy addon
|
||||
//go:embed pod-security-policy/pod-security-policy.yaml.tmpl
|
||||
PodSecurityPolicyAssets embed.FS
|
||||
|
||||
// StorageProvisionerAssets assets for storage-provisioner addon
|
||||
//go:embed storage-provisioner/storage-provisioner.yaml.tmpl
|
||||
StorageProvisionerAssets embed.FS
|
||||
|
||||
// StorageProvisionerGlusterAssets assets for storage-provisioner-gluster addon
|
||||
//go:embed storage-provisioner-gluster/*.tmpl
|
||||
StorageProvisionerGlusterAssets embed.FS
|
||||
|
||||
// EfkAssets assets for efk addon
|
||||
//go:embed efk/*.tmpl
|
||||
EfkAssets embed.FS
|
||||
|
||||
// IngressAssets assets for ingress addon
|
||||
//go:embed ingress/*.tmpl
|
||||
IngressAssets embed.FS
|
||||
|
||||
// IstioProvisionerAssets assets for istio-provisioner addon
|
||||
//go:embed istio-provisioner/istio-operator.yaml.tmpl
|
||||
IstioProvisionerAssets embed.FS
|
||||
|
||||
// IstioAssets assets for istio addon
|
||||
//go:embed istio/istio-default-profile.yaml.tmpl
|
||||
IstioAssets embed.FS
|
||||
|
||||
// KubevirtAssets assets for kubevirt addon
|
||||
//go:embed kubevirt/pod.yaml.tmpl
|
||||
KubevirtAssets embed.FS
|
||||
|
||||
// MetricsServerAssets assets for metrics-server addon
|
||||
//go:embed metrics-server/*.tmpl
|
||||
MetricsServerAssets embed.FS
|
||||
|
||||
// OlmAssets assets for olm addon
|
||||
//go:embed olm/*.tmpl
|
||||
OlmAssets embed.FS
|
||||
|
||||
// RegistryAssets assets for registry addon
|
||||
//go:embed registry/*.tmpl
|
||||
RegistryAssets embed.FS
|
||||
|
||||
// RegistryCredsAssets assets for registry-creds addon
|
||||
//go:embed registry-creds/registry-creds-rc.yaml.tmpl
|
||||
RegistryCredsAssets embed.FS
|
||||
|
||||
// RegistryAliasesAssets assets for registry-aliases addon
|
||||
//go:embed registry-aliases/*.tmpl
|
||||
RegistryAliasesAssets embed.FS
|
||||
|
||||
// FreshpodAssets assets for freshpod addon
|
||||
//go:embed freshpod/freshpod-rc.yaml.tmpl
|
||||
FreshpodAssets embed.FS
|
||||
|
||||
// NvidiaDriverInstallerAssets assets for nvidia-driver-installer addon
|
||||
//go:embed gpu/nvidia-driver-installer.yaml.tmpl
|
||||
NvidiaDriverInstallerAssets embed.FS
|
||||
|
||||
// NvidiaGpuDevicePluginAssets assets for nvidia-gpu-device-plugin addon
|
||||
//go:embed gpu/nvidia-gpu-device-plugin.yaml.tmpl
|
||||
NvidiaGpuDevicePluginAssets embed.FS
|
||||
|
||||
// LogviewerAssets assets for logviewer addon
|
||||
//go:embed logviewer/*.tmpl
|
||||
LogviewerAssets embed.FS
|
||||
|
||||
// GvisorAssets assets for gvisor addon
|
||||
//go:embed gvisor/*.tmpl gvisor/*.toml
|
||||
GvisorAssets embed.FS
|
||||
|
||||
// HelmTillerAssets assets for helm-tiller addon
|
||||
//go:embed helm-tiller/*.tmpl
|
||||
HelmTillerAssets embed.FS
|
||||
|
||||
// IngressDNSAssets assets for ingress-dns addon
|
||||
//go:embed ingress-dns/ingress-dns-pod.yaml.tmpl
|
||||
IngressDNSAssets embed.FS
|
||||
|
||||
// MetallbAssets assets for metallb addon
|
||||
//go:embed metallb/*.tmpl
|
||||
MetallbAssets embed.FS
|
||||
|
||||
// AmbassadorAssets assets for ambassador addon
|
||||
//go:embed ambassador/*.tmpl
|
||||
AmbassadorAssets embed.FS
|
||||
|
||||
// GcpAuthAssets assets for gcp-auth addon
|
||||
//go:embed gcp-auth/*.tmpl
|
||||
GcpAuthAssets embed.FS
|
||||
|
||||
// VolumeSnapshotsAssets assets for volumesnapshots addon
|
||||
//go:embed volumesnapshots/*.tmpl
|
||||
VolumeSnapshotsAssets embed.FS
|
||||
|
||||
// CsiHostpathDriverAssets assets for csi-hostpath-driver addon
|
||||
//go:embed csi-hostpath-driver/deploy/*.tmpl csi-hostpath-driver/rbac/*.tmpl
|
||||
CsiHostpathDriverAssets embed.FS
|
||||
)
|
|
@ -3,7 +3,7 @@ Description=Auto Pause Service
|
|||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/bin/auto-pause
|
||||
ExecStart=/bin/auto-pause --container-runtime={{.ContainerRuntime}}
|
||||
Restart=always
|
||||
|
||||
[Install]
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: apiregistration.k8s.io/v1beta1
|
||||
apiVersion: apiregistration.k8s.io/v1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1beta1.metrics.k8s.io
|
||||
|
|
|
@ -16,7 +16,7 @@ ExecStart=/usr/bin/containerd \
|
|||
--root ${PERSISTENT_DIR}/var/lib/containerd
|
||||
TasksMax=8192
|
||||
Delegate=yes
|
||||
KillMode=process
|
||||
KillMode=mixed
|
||||
LimitNOFILE=1048576
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
|
|
|
@ -17,6 +17,14 @@
|
|||
# For systemd + docker configuration used below, see the following references:
|
||||
# https://systemd.io/CONTAINER_INTERFACE/
|
||||
|
||||
|
||||
# multi-tage docker build so we can build auto-pause for arm64
|
||||
FROM golang:1.16
|
||||
WORKDIR /src
|
||||
# becaue auto-pause binary depends on minikube's code we need to pass the whole source code as the context
|
||||
ADD . .
|
||||
RUN cd ./cmd/auto-pause/ && go build
|
||||
|
||||
# start from ubuntu 20.04, this image is reasonably small as a starting point
|
||||
# for a kubernetes node image, it doesn't contain much we don't need
|
||||
FROM ubuntu:focal-20210401
|
||||
|
@ -24,12 +32,11 @@ FROM ubuntu:focal-20210401
|
|||
ARG BUILDKIT_VERSION="v0.8.2"
|
||||
|
||||
# copy in static files (configs, scripts)
|
||||
COPY 10-network-security.conf /etc/sysctl.d/10-network-security.conf
|
||||
COPY 11-tcp-mtu-probing.conf /etc/sysctl.d/11-tcp-mtu-probing.conf
|
||||
COPY clean-install /usr/local/bin/clean-install
|
||||
COPY entrypoint /usr/local/bin/entrypoint
|
||||
# must first run `make deploy/kicbase/auto-pause`
|
||||
COPY auto-pause /bin/auto-pause
|
||||
COPY deploy/kicbase/10-network-security.conf /etc/sysctl.d/10-network-security.conf
|
||||
COPY deploy/kicbase/11-tcp-mtu-probing.conf /etc/sysctl.d/11-tcp-mtu-probing.conf
|
||||
COPY deploy/kicbase/clean-install /usr/local/bin/clean-install
|
||||
COPY deploy/kicbase/entrypoint /usr/local/bin/entrypoint
|
||||
COPY --from=0 /src/cmd/auto-pause/auto-pause /bin/auto-pause
|
||||
|
||||
# Install dependencies, first from apt, then from release tarballs.
|
||||
# NOTE: we use one RUN to minimize layers.
|
||||
|
@ -152,14 +159,14 @@ RUN sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/lib
|
|||
systemd-tmpfiles --create
|
||||
|
||||
# automount service
|
||||
COPY automount/minikube-automount /usr/sbin/minikube-automount
|
||||
COPY automount/minikube-automount.service /usr/lib/systemd/system/minikube-automount.service
|
||||
COPY deploy/kicbase/automount/minikube-automount /usr/sbin/minikube-automount
|
||||
COPY deploy/kicbase/automount/minikube-automount.service /usr/lib/systemd/system/minikube-automount.service
|
||||
RUN ln -fs /usr/lib/systemd/system/minikube-automount.service \
|
||||
/etc/systemd/system/multi-user.target.wants/minikube-automount.service
|
||||
|
||||
# scheduled stop service
|
||||
COPY scheduled-stop/minikube-scheduled-stop /var/lib/minikube/scheduled-stop/minikube-scheduled-stop
|
||||
COPY scheduled-stop/minikube-scheduled-stop.service /usr/lib/systemd/system/minikube-scheduled-stop.service
|
||||
COPY deploy/kicbase/scheduled-stop/minikube-scheduled-stop /var/lib/minikube/scheduled-stop/minikube-scheduled-stop
|
||||
COPY deploy/kicbase/scheduled-stop/minikube-scheduled-stop.service /usr/lib/systemd/system/minikube-scheduled-stop.service
|
||||
RUN chmod +x /var/lib/minikube/scheduled-stop/minikube-scheduled-stop
|
||||
|
||||
# disable non-docker runtimes by default
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// +build release
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
|
@ -47,13 +45,30 @@ func getSHAFromURL(url string) (string, error) {
|
|||
return hex.EncodeToString(b[:]), nil
|
||||
}
|
||||
|
||||
// TestReleasesJSON checks if all *GA* releases
|
||||
// enlisted in https://storage.googleapis.com/minikube/releases.json
|
||||
// are available to download and have correct hashsum
|
||||
func TestReleasesJSON(t *testing.T) {
|
||||
releases, err := notify.GetAllVersionsFromURL(notify.GithubMinikubeReleasesURL)
|
||||
releases, err := notify.AllVersionsFromURL(notify.GithubMinikubeReleasesURL)
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting releases.json: %v", err)
|
||||
}
|
||||
checkReleases(t, releases)
|
||||
}
|
||||
|
||||
for _, r := range releases {
|
||||
// TestBetaReleasesJSON checks if all *BETA* releases
|
||||
// enlisted in https://storage.googleapis.com/minikube/releases-beta.json
|
||||
// are available to download and have correct hashsum
|
||||
func TestBetaReleasesJSON(t *testing.T) {
|
||||
releases, err := notify.AllVersionsFromURL(notify.GithubMinikubeBetaReleasesURL)
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting releases-bets.json: %v", err)
|
||||
}
|
||||
checkReleases(t, releases)
|
||||
}
|
||||
|
||||
func checkReleases(t *testing.T, rs notify.Releases) {
|
||||
for _, r := range rs {
|
||||
fmt.Printf("Checking release: %s\n", r.Name)
|
||||
for platform, sha := range r.Checksums {
|
||||
fmt.Printf("Checking SHA for %s.\n", platform)
|
||||
|
|
|
@ -1,5 +1,13 @@
|
|||
[
|
||||
{
|
||||
"name": "v1.22.0-beta.0",
|
||||
"checksums": {
|
||||
"darwin": "1ec06c37be5c6c79a7255da09ff83490a44d1e8cd2b2f45e4b489edfdeacde94",
|
||||
"linux": "c9d9ac605a94748379188cced6b832037b8069441744b889214990c4ca3485a5",
|
||||
"windows": "68fb9c24f0ea55b985856d0cce9fa0c288b8a4d7e13519d6f0790038165d7ef1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "v1.21.0-beta.0",
|
||||
"checksums": {
|
||||
"darwin": "69ab001eb4984d09ed731d5ac92afd8310e5c7672c2275b39d7a4c7e2dcfb4c6",
|
||||
|
@ -10,9 +18,9 @@
|
|||
{
|
||||
"name": "v1.20.0-beta.0",
|
||||
"checksums": {
|
||||
"darwin": "",
|
||||
"linux": "",
|
||||
"windows": ""
|
||||
"darwin": "686f8d7c06c93f28543f982ec56a68544ab2ad6c7f70b39ede5174d7bac29651",
|
||||
"linux": "fe0796852c9ef266597fc93fa4b7a88d2cab9ba7008f0e9f644b633c51d269a1",
|
||||
"windows": "84a0686c90ab88d04a0aab57b8cadacf9197d3ea6b467f9f807d071efe7fad3c"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
|
|
@ -1,4 +1,12 @@
|
|||
[
|
||||
{
|
||||
"name": "v1.21.0",
|
||||
"checksums": {
|
||||
"darwin": "e2043883ca993b2a65396d379823dab6404dd842d0cc2a81348d247b01785070",
|
||||
"linux": "5d423a00a24fdfbb95627a3fadbf58540fc4463be2338619257c529f93cf061b",
|
||||
"windows": "74c961877798531ab8e53e2590bfae3cee7690d0c2e0614fdb44339e065124b5"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "v1.20.0",
|
||||
"checksums": {
|
||||
|
|
|
@ -0,0 +1,205 @@
|
|||
# Periodically tell user about minikube features/tips and tricks
|
||||
|
||||
* First proposed: 2021-06-18
|
||||
* Authors: Peixuan Ding (@dinever)
|
||||
|
||||
## Reviewer Priorities
|
||||
|
||||
Please review this proposal with the following priorities:
|
||||
|
||||
* Does this fit with minikube's [principles](https://minikube.sigs.k8s.io/docs/concepts/principles/)?
|
||||
* Are there other approaches to consider?
|
||||
* Could the implementation be made simpler?
|
||||
* Are there usability, reliability, or technical debt concerns?
|
||||
|
||||
Please leave the above text in your proposal as instructions to the reader.
|
||||
|
||||
## Summary
|
||||
|
||||
minikube has lots of great features. We want to proactively remind users that
|
||||
those features are available.
|
||||
|
||||
To achieve this, we can have a tips feature that randomly shows a tip
|
||||
from a curated list whenever the user starts a new minikube profile.
|
||||
|
||||
For example:
|
||||
|
||||

|
||||
|
||||
## Goals
|
||||
|
||||
* Store a list of tips in a static file
|
||||
* Show a random minikube usage tip each time a user starts a minikube profile
|
||||
* Have the tips synced to the Hugo docs website to make those available through docs
|
||||
* Allow user to disable the Tips feature with minikube config
|
||||
|
||||
## Non-Goals
|
||||
|
||||
* Modify any existing functionalities or docs
|
||||
|
||||
## Design Details
|
||||
|
||||
First, we need a static file to store all the tips, we can have a YAML file at [pkg/generate/tips/tips.yaml](https://github.com/kubernetes/minikube/tree/master/pkg/generate):
|
||||
|
||||
```YAML
|
||||
tips:
|
||||
- |
|
||||
You can specify any Kubernetes version you want. For example:
|
||||
|
||||
```
|
||||
minikube start --kubernetes-version=v1.19.0
|
||||
```
|
||||
- |
|
||||
You can use minikube's built-in kubectl. For example:
|
||||
|
||||
```
|
||||
minikube kubectl -- get pods
|
||||
```
|
||||
- |
|
||||
minikube has the built-in Kubernetes Dashboard UI. To access it:
|
||||
|
||||
```
|
||||
minikube dashboard
|
||||
```
|
||||
```
|
||||
|
||||
Use `goembed` to embed this file into the minikube binary.
|
||||
|
||||
The current `out.Boxed` has a hard-coded style (red). I propose to add another `out.BoxedWithConfig` method to allow
|
||||
output with customized style:
|
||||
|
||||
```go
|
||||
// BoxedWithConfig writes a templated message in a box with customized style config to stdout
|
||||
func BoxedWithConfig(cfg box.Config, st style.Enum, title string, format string, a ...V) {
|
||||
}
|
||||
```
|
||||
|
||||
Whenever minikube successfully starts, we randomly choose a tip.
|
||||
|
||||
Before printing it out, we need to do some regex replacement to strip the markdown syntax
|
||||
for better view experience in Terminal:
|
||||
|
||||
From this:
|
||||
|
||||
``````markdown
|
||||
You can specify any Kubernetes version you want. For example:
|
||||
|
||||
```
|
||||
minikube start --kubernetes-version=v1.19.0
|
||||
```
|
||||
``````
|
||||
|
||||
To this:
|
||||
|
||||
```markdown
|
||||
You can specify any Kubernetes version you want. For example:
|
||||
|
||||
minikube start --kubernetes-version=v1.19.0
|
||||
```
|
||||
|
||||
Then we can print out the tip:
|
||||
|
||||
|
||||
```go
|
||||
boxCfg := out.BoxConfig{
|
||||
Config: box.Config{
|
||||
Py: 1,
|
||||
Px: 5,
|
||||
TitlePos: "Top",
|
||||
Type: "Round",
|
||||
Color: tipBoxColor,
|
||||
},
|
||||
Title: tipTitle,
|
||||
Icon: style.Tip,
|
||||
}
|
||||
|
||||
out.BoxedWithConfig(boxCfg, tips.Tips[chosen] + "\n\n" + tipSuffix)
|
||||
```
|
||||
|
||||

|
||||
|
||||
User can choose to disable this through `minikube config set disable-tips true`
|
||||
|
||||
We will have `make generate-docs` generating the docs site based on this YAML file as well.
|
||||
|
||||
We can have a `Nice to know` sub-page under `FAQ`?
|
||||
|
||||

|
||||
|
||||
|
||||
### About the tip collection
|
||||
|
||||
I plan to start with the command lines and cover almost all CLI usages of minikube.
|
||||
|
||||
That includes but not limited to:
|
||||
- addons
|
||||
- cached images
|
||||
- command line completion
|
||||
- config
|
||||
- file copy
|
||||
- dashboard
|
||||
- delete minikube cluster
|
||||
- configure minikube's docker/podman env
|
||||
- image build / load / ls / rm
|
||||
- ip
|
||||
- logging
|
||||
- kubectl
|
||||
- mount file directory
|
||||
- multi-node
|
||||
- pause/unpause to save resource
|
||||
- multi-profile
|
||||
- surface URL to a k8s service
|
||||
- ssh into minikube
|
||||
- status
|
||||
- tunnel to connect to LB
|
||||
- update-check to check versions
|
||||
- update-context
|
||||
|
||||
### Implementation
|
||||
|
||||
I plan to open at least 4 PRs:
|
||||
|
||||
1. `out.Boxed` with custom style
|
||||
2. random `tips` display with ability to disable through config, with an initial set of about 10 tips
|
||||
3. `make generate-docs` to sync tips to docs
|
||||
4. Add more tips
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
1. Is there a more preferred file format to YAML?
|
||||
|
||||
2. Maybe we just want to sync the tips to the `FAQ` page list instead of creating a new page?
|
||||
|
||||
3. Instead of the file format I proposed, maybe add a `question` field?
|
||||
|
||||
```yaml
|
||||
tips:
|
||||
- question: How to specify a different Kubernetes version?
|
||||
answer: |
|
||||
You can specify any Kubernetes version you want. For example:
|
||||
|
||||
```
|
||||
minikube start --kubernetes-version=v1.19.0
|
||||
```
|
||||
- question: Do I have to install `kubectl` myself?
|
||||
answer: |
|
||||
You can use minikube's built-in kubectl. For example:
|
||||
|
||||
```
|
||||
minikube kubectl -- get pods
|
||||
```
|
||||
- question: How do I access the Kubernetes Dashboard UI?
|
||||
answer: |
|
||||
minikube has the built-in Kubernetes Dashboard UI. To access it:
|
||||
|
||||
```
|
||||
minikube dashboard
|
||||
```
|
||||
```
|
||||
|
||||
On the docs side we can show both questions and answers. On the CLI side
|
||||
we can either show both questions and answers, or just show the answers
|
||||
to make it more compact.
|
||||
|
||||

|
||||
|
99
go.mod
99
go.mod
|
@ -5,35 +5,32 @@ go 1.16
|
|||
require (
|
||||
cloud.google.com/go/storage v1.15.0
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.12.1
|
||||
github.com/Azure/azure-sdk-for-go v43.3.0+incompatible
|
||||
github.com/Delta456/box-cli-maker/v2 v2.2.1
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v0.16.0
|
||||
github.com/Microsoft/hcsshim v0.8.15 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.17 // indirect
|
||||
github.com/Parallels/docker-machine-parallels/v2 v2.0.1
|
||||
github.com/VividCortex/godaemon v0.0.0-20201030160542-15e3f4925a21
|
||||
github.com/blang/semver v3.5.1+incompatible
|
||||
github.com/VividCortex/godaemon v1.0.0
|
||||
github.com/blang/semver/v4 v4.0.0
|
||||
github.com/briandowns/spinner v1.11.1
|
||||
github.com/c4milo/gotoolkit v0.0.0-20170318115440-bcc06269efa9 // indirect
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible
|
||||
github.com/c4milo/gotoolkit v0.0.0-20190525173301-67483a18c17a // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.1
|
||||
github.com/cheggaaa/pb/v3 v3.0.8
|
||||
github.com/cloudevents/sdk-go/v2 v2.3.1
|
||||
github.com/cloudfoundry-attic/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21
|
||||
github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 // indirect
|
||||
github.com/docker/cli v0.0.0-20200303162255-7d407207c304 // indirect
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20210128214336-420b1d36250f+incompatible
|
||||
github.com/docker/docker v20.10.7+incompatible
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/docker/machine v0.16.2
|
||||
github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f
|
||||
github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f // indirect
|
||||
github.com/elazarl/goproxy v0.0.0-20210110162100-a92cc753f88e
|
||||
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
|
||||
github.com/google/go-cmp v0.5.6
|
||||
github.com/google/go-containerregistry v0.4.1
|
||||
github.com/google/go-github v17.0.0+incompatible
|
||||
github.com/google/go-github/v32 v32.1.0
|
||||
github.com/google/slowjam v0.0.0-20200530021616-df27e642fe7b
|
||||
github.com/google/go-github/v36 v36.0.0
|
||||
github.com/google/slowjam v1.0.0
|
||||
github.com/google/uuid v1.2.0
|
||||
github.com/hashicorp/go-getter v1.5.2
|
||||
github.com/gookit/color v1.4.2 // indirect
|
||||
github.com/hashicorp/go-getter v1.5.4
|
||||
github.com/hashicorp/go-retryablehttp v0.7.0
|
||||
github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect
|
||||
github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214 // indirect
|
||||
|
@ -55,8 +52,10 @@ require (
|
|||
github.com/machine-drivers/docker-machine-driver-vmware v0.1.3
|
||||
github.com/mattbaird/jsonpatch v0.0.0-20200820163806-098863c1fc24
|
||||
github.com/mattn/go-isatty v0.0.13
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mitchellh/go-ps v1.0.0
|
||||
github.com/moby/hyperkit v0.0.0-20210108224842-2f061e447e14
|
||||
github.com/moby/sys/mount v0.2.0 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/otiai10/copy v1.6.0
|
||||
|
@ -71,7 +70,7 @@ require (
|
|||
github.com/shirou/gopsutil/v3 v3.21.5
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.7.1
|
||||
github.com/spf13/viper v1.8.1
|
||||
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f
|
||||
github.com/zchee/go-vmnet v0.0.0-20161021174912-97ebf9174097
|
||||
go.opencensus.io v0.23.0
|
||||
|
@ -79,59 +78,55 @@ require (
|
|||
go.opentelemetry.io/otel/sdk v0.16.0
|
||||
go.opentelemetry.io/otel/trace v0.17.0
|
||||
golang.org/x/build v0.0.0-20190927031335-2835ba2e683f
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
|
||||
golang.org/x/exp v0.0.0-20210220032938-85be41e4509f
|
||||
golang.org/x/mod v0.4.2
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c
|
||||
golang.org/x/oauth2 v0.0.0-20210615190721-d04028783cf1
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015
|
||||
golang.org/x/sys v0.0.0-20210629170331-7dc0b73dc9fb
|
||||
golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72
|
||||
golang.org/x/text v0.3.6
|
||||
gonum.org/v1/plot v0.9.0
|
||||
google.golang.org/api v0.47.0
|
||||
google.golang.org/api v0.49.0
|
||||
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
k8s.io/api v0.20.5
|
||||
k8s.io/apimachinery v0.20.5
|
||||
k8s.io/client-go v0.20.5
|
||||
k8s.io/api v0.21.2
|
||||
k8s.io/apimachinery v0.21.2
|
||||
k8s.io/client-go v0.21.2
|
||||
k8s.io/klog/v2 v2.9.0
|
||||
k8s.io/kubectl v0.0.0
|
||||
k8s.io/kubernetes v1.20.5
|
||||
k8s.io/kubectl v0.21.2
|
||||
k8s.io/kubernetes v1.21.2
|
||||
sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.3.0
|
||||
)
|
||||
|
||||
replace (
|
||||
git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999
|
||||
github.com/briandowns/spinner => github.com/alonyb/spinner v1.12.7
|
||||
github.com/docker/docker => github.com/afbjorklund/moby v0.0.0-20210308214533-2fa72faf0e8b
|
||||
github.com/docker/machine => github.com/machine-drivers/machine v0.7.1-0.20210306082426-fcb2ad5bcb17
|
||||
github.com/google/go-containerregistry => github.com/afbjorklund/go-containerregistry v0.4.1-0.20210321165649-761f6f9626b1
|
||||
github.com/samalba/dockerclient => github.com/sayboras/dockerclient v1.0.0
|
||||
k8s.io/api => k8s.io/api v0.20.5
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.5
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.20.5
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.20.5
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.5
|
||||
k8s.io/client-go => k8s.io/client-go v0.20.5
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.5
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.5
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.20.5
|
||||
k8s.io/component-base => k8s.io/component-base v0.20.5
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.20.5
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.20.5
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.20.5
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.5
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.5
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.5
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.5
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.5
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.20.5
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.20.5
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.5
|
||||
k8s.io/metrics => k8s.io/metrics v0.20.5
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.20.5
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.5
|
||||
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.20.5
|
||||
k8s.io/sample-controller => k8s.io/sample-controller v0.20.5
|
||||
k8s.io/api => k8s.io/api v0.21.2
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.2
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.21.2
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.21.2
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.2
|
||||
k8s.io/client-go => k8s.io/client-go v0.21.2
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.2
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.2
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.21.2
|
||||
k8s.io/component-base => k8s.io/component-base v0.21.2
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.21.2
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.21.2
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.21.2
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.2
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.2
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.2
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.2
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.2
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.21.2
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.21.2
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.2
|
||||
k8s.io/metrics => k8s.io/metrics v0.21.2
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.21.2
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.2
|
||||
)
|
||||
|
|
|
@ -17,9 +17,9 @@
|
|||
set -e
|
||||
|
||||
install_kind() {
|
||||
curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.0/kind-linux-amd64
|
||||
curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/latest/download/kind-linux-amd64
|
||||
chmod +x ./kind
|
||||
sudo mv ./kind /usr/local
|
||||
sudo mv ./kind /usr/local/bin/kind
|
||||
}
|
||||
|
||||
install_k3d() {
|
||||
|
@ -31,31 +31,58 @@ install_minikube() {
|
|||
sudo install ./out/minikube /usr/local/bin/minikube
|
||||
}
|
||||
|
||||
install_gh() {
|
||||
export access_token="$1"
|
||||
|
||||
# Make sure gh is installed and configured
|
||||
./hack/jenkins/installers/check_install_gh.sh
|
||||
}
|
||||
|
||||
config_git() {
|
||||
git config user.name "minikube-bot"
|
||||
git config user.email "minikube-bot@google.com"
|
||||
}
|
||||
|
||||
create_branch() {
|
||||
git checkout -b addTimeToK8s"$1"
|
||||
}
|
||||
|
||||
run_benchmark() {
|
||||
( cd ./hack/benchmark/time-to-k8s/time-to-k8s/ &&
|
||||
pwd
|
||||
( cd ./hack/benchmark/time-to-k8s/time-to-k8s-repo/ &&
|
||||
git submodule update --init &&
|
||||
go run . --config local-kubernetes.yaml --iterations 5 --output output.csv )
|
||||
go run . --config local-kubernetes.yaml --iterations 10 --output output.csv )
|
||||
}
|
||||
|
||||
generate_chart() {
|
||||
go run ./hack/benchmark/time-to-k8s/chart.go --csv ./hack/benchmark/time-to-k8s/time-to-k8s/output.csv --output ./site/static/images/benchmarks/timeToK8s/"$1".png
|
||||
go run ./hack/benchmark/time-to-k8s/chart.go --csv ./hack/benchmark/time-to-k8s/time-to-k8s-repo/output.csv --output ./site/static/images/benchmarks/timeToK8s/"$1".png
|
||||
}
|
||||
|
||||
create_page() {
|
||||
printf -- "---\ntitle: \"%s Benchmark\"\nlinkTitle: \"%s Benchmark\"\nweight: 1\n---\n\n\n" "$1" "$1" "$1" > ./site/content/en/docs/benchmarks/timeToK8s/"$1".md
|
||||
}
|
||||
|
||||
commit_chart() {
|
||||
commit_changes() {
|
||||
git add ./site/static/images/benchmarks/timeToK8s/"$1".png ./site/content/en/docs/benchmarks/timeToK8s/"$1".md
|
||||
git commit -m 'update time-to-k8s chart'
|
||||
git commit -m "add time-to-k8s benchmark for $1"
|
||||
}
|
||||
|
||||
create_pr() {
|
||||
git remote add minikube-bot https://minikube-bot:"$2"@github.com/minikube-bot/minikube.git
|
||||
git push -u minikube-bot addTimeToK8s"$1"
|
||||
gh pr create --repo kubernetes/minikube --base master --title "Add time-to-k8s benchmark for $1" --body "Updating time-to-k8s benchmark as part of the release process"
|
||||
}
|
||||
|
||||
install_kind
|
||||
install_k3d
|
||||
install_minikube
|
||||
VERSION=$(minikube version --short)
|
||||
install_gh "$1"
|
||||
config_git
|
||||
|
||||
VERSION=$(minikube version --short)
|
||||
create_branch "$VERSION"
|
||||
run_benchmark
|
||||
generate_chart "$VERSION"
|
||||
create_page "$VERSION"
|
||||
commit_chart "$VERSION"
|
||||
commit_changes "$VERSION"
|
||||
create_pr "$VERSION" "$1"
|
||||
|
|
|
@ -45,16 +45,11 @@ make release-iso | tee iso-logs.txt
|
|||
ec=$?
|
||||
if [ $ec -gt 0 ]; then
|
||||
if [ "$release" = false ]; then
|
||||
err=$(tail -100 iso-logs.txt)
|
||||
gh pr comment ${ghprbPullId} --body "Hi ${ghprbPullAuthorLoginMention}, building a new ISO failed, with the error below:
|
||||
|
||||
<details>
|
||||
<pre>
|
||||
${err}
|
||||
</pre>
|
||||
</details>
|
||||
|
||||
Full logs are at https://storage.cloud.google.com/minikube-builds/logs/${ghprbPullId}/${ghprbActualCommit:0:7}/iso_build.txt
|
||||
gh pr comment ${ghprbPullId} --body "Hi ${ghprbPullAuthorLoginMention}, building a new ISO failed.
|
||||
See the logs at:
|
||||
```
|
||||
https://storage.cloud.google.com/minikube-builds/logs/${ghprbPullId}/${ghprbActualCommit:0:7}/iso_build.txt
|
||||
```
|
||||
"
|
||||
fi
|
||||
exit $ec
|
||||
|
|
|
@ -419,7 +419,7 @@ fi
|
|||
|
||||
touch "${HTML_OUT}"
|
||||
touch "${SUMMARY_OUT}"
|
||||
gopogh_status=$(gopogh -in "${JSON_OUT}" -out_html "${HTML_OUT}" -out_summary "${SUMMARY_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}") || true
|
||||
gopogh_status=$(gopogh -in "${JSON_OUT}" -out_html "${HTML_OUT}" -out_summary "${SUMMARY_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}:$(date +%Y-%m-%d)") || true
|
||||
fail_num=$(echo $gopogh_status | jq '.NumberOfFail')
|
||||
test_num=$(echo $gopogh_status | jq '.NumberOfTests')
|
||||
pessimistic_status="${fail_num} / ${test_num} failures"
|
||||
|
@ -429,17 +429,27 @@ if [ "$status" = "failure" ]; then
|
|||
fi
|
||||
echo "$description"
|
||||
|
||||
REPORT_URL_BASE="https://storage.googleapis.com"
|
||||
|
||||
if [ -z "${EXTERNAL}" ]; then
|
||||
# If we're already in GCP, then upload results to GCS directly
|
||||
SHORT_COMMIT=${COMMIT:0:7}
|
||||
JOB_GCS_BUCKET="minikube-builds/logs/${MINIKUBE_LOCATION}/${SHORT_COMMIT}/${JOB_NAME}"
|
||||
echo ">> Copying ${TEST_OUT} to gs://${JOB_GCS_BUCKET}out.txt"
|
||||
gsutil -qm cp "${TEST_OUT}" "gs://${JOB_GCS_BUCKET}out.txt"
|
||||
echo ">> uploading ${JSON_OUT}"
|
||||
|
||||
echo ">> Copying ${TEST_OUT} to gs://${JOB_GCS_BUCKET}.out.txt"
|
||||
echo ">> public URL: ${REPORT_URL_BASE}/${JOB_GCS_BUCKET}.out.txt"
|
||||
gsutil -qm cp "${TEST_OUT}" "gs://${JOB_GCS_BUCKET}.out.txt"
|
||||
|
||||
echo ">> uploading ${JSON_OUT} to gs://${JOB_GCS_BUCKET}.json"
|
||||
echo ">> public URL: ${REPORT_URL_BASE}/${JOB_GCS_BUCKET}.json"
|
||||
gsutil -qm cp "${JSON_OUT}" "gs://${JOB_GCS_BUCKET}.json" || true
|
||||
echo ">> uploading ${HTML_OUT}"
|
||||
|
||||
echo ">> uploading ${HTML_OUT} to ${REPORT_URL_BASE}/${JOB_GCS_BUCKET}.html"
|
||||
echo ">> public URL: ${REPORT_URL_BASE}/${JOB_GCS_BUCKET}.html"
|
||||
gsutil -qm cp "${HTML_OUT}" "gs://${JOB_GCS_BUCKET}.html" || true
|
||||
echo ">> uploading ${SUMMARY_OUT}"
|
||||
|
||||
echo ">> uploading ${SUMMARY_OUT} to ${REPORT_URL_BASE}/${JOB_GCS_BUCKET}_summary.json"
|
||||
echo ">> public URL: ${REPORT_URL_BASE}/${JOB_GCS_BUCKET}_summary.json"
|
||||
gsutil -qm cp "${SUMMARY_OUT}" "gs://${JOB_GCS_BUCKET}_summary.json" || true
|
||||
else
|
||||
# Otherwise, put the results in a predictable spot so the upload job can find them
|
||||
|
|
|
@ -67,16 +67,11 @@ CIBUILD=yes make push-kic-base-image | tee kic-logs.txt
|
|||
ec=$?
|
||||
if [ $ec -gt 0 ]; then
|
||||
if [ "$release" = false ]; then
|
||||
err=$(tail -100 kic-logs.txt)
|
||||
gh pr comment ${ghprbPullId} --body "Hi ${ghprbPullAuthorLoginMention}, building a new kicbase image failed, with the error below:
|
||||
|
||||
<details>
|
||||
<pre>
|
||||
${err}
|
||||
</pre>
|
||||
</details>
|
||||
|
||||
Full logs are at https://storage.cloud.google.com/minikube-builds/logs/${ghprbPullId}/${ghprbActualCommit:0:7}/kic_image_build.txt
|
||||
gh pr comment ${ghprbPullId} --body "Hi ${ghprbPullAuthorLoginMention}, building a new kicbase image failed.
|
||||
See the logs at:
|
||||
```
|
||||
https://storage.cloud.google.com/minikube-builds/logs/${ghprbPullId}/${ghprbActualCommit:0:7}/kic_image_build.txt
|
||||
```
|
||||
"
|
||||
fi
|
||||
exit $ec
|
||||
|
|
|
@ -46,6 +46,8 @@ make -j 16 \
|
|||
out/minikube_${DEB_VER}_amd64.deb \
|
||||
out/minikube_${DEB_VER}_arm64.deb \
|
||||
out/docker-machine-driver-kvm2_$(make deb_version_base).deb \
|
||||
out/docker-machine-driver-kvm2_${DEB_VER}_amd64.deb \
|
||||
out/docker-machine-driver-kvm2_${DEB_VER}_arm64.deb \
|
||||
&& failed=$? || failed=$?
|
||||
|
||||
BUILT_VERSION=$("out/minikube-$(go env GOOS)-$(go env GOARCH)" version)
|
||||
|
@ -70,7 +72,7 @@ fi
|
|||
cp -r test/integration/testdata out/
|
||||
|
||||
# Don't upload the buildroot artifacts if they exist
|
||||
rm -r out/buildroot || true
|
||||
rm -rf out/buildroot
|
||||
|
||||
# At this point, the out directory contains the jenkins scripts (populated by jenkins),
|
||||
# testdata, and our build output. Push the changes to GCS so that worker nodes can re-use them.
|
||||
|
|
|
@ -94,3 +94,5 @@ for j in ${jobs[@]}; do
|
|||
"https://storage.googleapis.com/minikube-builds/logs/${ghprbPullId}/${SHORT_COMMIT}/${j}.pending"
|
||||
done
|
||||
|
||||
STARTED_LIST_REMOTE="gs://minikube-builds/logs/${ghprbPullId}/${SHORT_COMMIT}/started_environments_${BUILD_NUMBER}.txt"
|
||||
printf "%s\n" "${jobs[@]}" | gsutil cp - "${STARTED_LIST_REMOTE}"
|
||||
|
|
|
@ -34,13 +34,6 @@ EXTRA_TEST_ARGS=""
|
|||
EXPECTED_DEFAULT_DRIVER="docker"
|
||||
EXTERNAL="yes"
|
||||
|
||||
# fix mac os as a service on mac os
|
||||
# https://github.com/docker/for-mac/issues/882#issuecomment-506372814
|
||||
#osascript -e 'quit app "Docker"'
|
||||
#/Applications/Docker.app/Contents/MacOS/Docker --quit-after-install --unattended || true
|
||||
#osascript -e 'quit app "Docker"'
|
||||
#/Applications/Docker.app/Contents/MacOS/Docker --unattended &
|
||||
|
||||
begin=$(date +%s)
|
||||
while [ -z "$(docker info 2> /dev/null )" ];
|
||||
do
|
||||
|
|
|
@ -64,6 +64,7 @@ env BUILD_IN_DOCKER=y \
|
|||
"out/minikube-${RPM_VERSION}-${RPM_REVISION}.ppc64le.rpm" \
|
||||
"out/minikube-${RPM_VERSION}-${RPM_REVISION}.s390x.rpm" \
|
||||
"out/docker-machine-driver-kvm2_${DEB_VERSION}-${DEB_REVISION}_amd64.deb" \
|
||||
"out/docker-machine-driver-kvm2_${DEB_VERSION}-${DEB_REVISION}_arm64.deb" \
|
||||
"out/docker-machine-driver-kvm2-${RPM_VERSION}-${RPM_REVISION}.x86_64.rpm"
|
||||
|
||||
# check if 'commit: <commit-id>' line contains '-dirty' commit suffix
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Collects all test data manually, processes it, and uploads to GCS. This will
|
||||
# overwrite any existing data. This should only be done for a dryrun, new data
|
||||
# should be handled exclusively through upload_tests.sh.
|
||||
# Example usage: ./collect_data_manual.sh
|
||||
|
||||
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
|
||||
# 1) "cat" together all summary files.
|
||||
# 2) Process all summary files.
|
||||
# 3) Optimize the resulting data.
|
||||
# 4) Store in GCS bucket.
|
||||
gsutil cat gs://minikube-builds/logs/master/*/*_summary.json \
|
||||
| $DIR/process_data.sh \
|
||||
| $DIR/optimize_data.sh \
|
||||
| gsutil cp - gs://minikube-flake-rate/data.csv
|
|
@ -0,0 +1,264 @@
|
|||
/*
|
||||
Copyright 2021 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
dataCsv = flag.String("data-csv", "", "Source data to compute flake rates on")
|
||||
dateRange = flag.Uint("date-range", 5, "Number of test dates to consider when computing flake rate")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
file, err := os.Open(*dataCsv)
|
||||
if err != nil {
|
||||
exit("Unable to read data CSV", err)
|
||||
}
|
||||
|
||||
testEntries := readData(file)
|
||||
splitEntries := splitData(testEntries)
|
||||
filteredEntries := filterRecentEntries(splitEntries, *dateRange)
|
||||
flakeRates := computeFlakeRates(filteredEntries)
|
||||
averageDurations := computeAverageDurations(filteredEntries)
|
||||
fmt.Println("Environment,Test,Flake Rate,Duration")
|
||||
for environment, environmentSplit := range flakeRates {
|
||||
for test, flakeRate := range environmentSplit {
|
||||
duration := averageDurations[environment][test]
|
||||
fmt.Printf("%s,%s,%.2f,%.3f\n", environment, test, flakeRate*100, duration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// One entry of a test run.
|
||||
// Example: TestEntry {
|
||||
// name: "TestFunctional/parallel/LogsCmd",
|
||||
// environment: "Docker_Linux",
|
||||
// date: time.Now,
|
||||
// status: "Passed",
|
||||
// duration: 0.1,
|
||||
// }
|
||||
type testEntry struct {
|
||||
name string
|
||||
environment string
|
||||
date time.Time
|
||||
status string
|
||||
duration float32
|
||||
}
|
||||
|
||||
// A map with keys of (environment, test_name) to values of slcies of TestEntry.
|
||||
type splitEntryMap map[string]map[string][]testEntry
|
||||
|
||||
// Reads CSV `file` and consumes each line to be a single TestEntry.
|
||||
func readData(file io.Reader) []testEntry {
|
||||
testEntries := []testEntry{}
|
||||
|
||||
fileReader := bufio.NewReaderSize(file, 256)
|
||||
previousLine := []string{"", "", "", "", "", ""}
|
||||
firstLine := true
|
||||
for {
|
||||
lineBytes, _, err := fileReader.ReadLine()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
exit("Error reading data CSV", err)
|
||||
}
|
||||
line := string(lineBytes)
|
||||
fields := strings.Split(line, ",")
|
||||
if firstLine {
|
||||
if len(fields) != 6 {
|
||||
exit(fmt.Sprintf("Data CSV in incorrect format. Expected 6 columns, but got %d", len(fields)), fmt.Errorf("bad CSV format"))
|
||||
}
|
||||
firstLine = false
|
||||
}
|
||||
for i, field := range fields {
|
||||
if field == "" {
|
||||
fields[i] = previousLine[i]
|
||||
}
|
||||
}
|
||||
if len(fields) != 6 {
|
||||
fmt.Printf("Found line with wrong number of columns. Expectd 6, but got %d - skipping\n", len(fields))
|
||||
continue
|
||||
}
|
||||
previousLine = fields
|
||||
if fields[4] == "Passed" || fields[4] == "Failed" {
|
||||
date, err := time.Parse("2006-01-02", fields[1])
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to parse date: %v\n", err)
|
||||
continue
|
||||
}
|
||||
duration, err := strconv.ParseFloat(fields[5], 32)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to parse duration: %v\n", err)
|
||||
continue
|
||||
}
|
||||
testEntries = append(testEntries, testEntry{
|
||||
name: fields[3],
|
||||
environment: fields[2],
|
||||
date: date,
|
||||
status: fields[4],
|
||||
duration: float32(duration),
|
||||
})
|
||||
}
|
||||
}
|
||||
return testEntries
|
||||
}
|
||||
|
||||
// Splits `testEntries` up into maps indexed first by environment and then by test.
|
||||
func splitData(testEntries []testEntry) splitEntryMap {
|
||||
splitEntries := make(splitEntryMap)
|
||||
|
||||
for _, entry := range testEntries {
|
||||
appendEntry(splitEntries, entry.environment, entry.name, entry)
|
||||
}
|
||||
|
||||
return splitEntries
|
||||
}
|
||||
|
||||
// Appends `entry` to `splitEntries` at the `environment` and `test`.
|
||||
func appendEntry(splitEntries splitEntryMap, environment, test string, entry testEntry) {
|
||||
// Lookup the environment.
|
||||
environmentSplit, ok := splitEntries[environment]
|
||||
if !ok {
|
||||
// If the environment map is missing, make a map for this environment and store it.
|
||||
environmentSplit = make(map[string][]testEntry)
|
||||
splitEntries[environment] = environmentSplit
|
||||
}
|
||||
|
||||
// Lookup the test.
|
||||
testSplit, ok := environmentSplit[test]
|
||||
if !ok {
|
||||
// If the test is missing, make a slice for this test.
|
||||
testSplit = make([]testEntry, 0)
|
||||
// The slice is not inserted, since it will be replaced anyway.
|
||||
}
|
||||
environmentSplit[test] = append(testSplit, entry)
|
||||
}
|
||||
|
||||
// Filters `splitEntries` to include only the most recent `date_range` dates.
|
||||
func filterRecentEntries(splitEntries splitEntryMap, dateRange uint) splitEntryMap {
|
||||
filteredEntries := make(splitEntryMap)
|
||||
|
||||
for environment, environmentSplit := range splitEntries {
|
||||
for test, testSplit := range environmentSplit {
|
||||
dates := make([]time.Time, len(testSplit))
|
||||
for _, entry := range testSplit {
|
||||
dates = append(dates, entry.date)
|
||||
}
|
||||
// Sort dates from future to past.
|
||||
sort.Slice(dates, func(i, j int) bool {
|
||||
return dates[j].Before(dates[i])
|
||||
})
|
||||
datesInRange := make([]time.Time, 0, dateRange)
|
||||
var lastDate time.Time
|
||||
// Go through each date.
|
||||
for _, date := range dates {
|
||||
// If date is the same as last date, ignore it.
|
||||
if date.Equal(lastDate) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add the date.
|
||||
datesInRange = append(datesInRange, date)
|
||||
lastDate = date
|
||||
// If the date_range has been hit, break out.
|
||||
if uint(len(datesInRange)) == dateRange {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, entry := range testSplit {
|
||||
// Look for the first element <= entry.date
|
||||
index := sort.Search(len(datesInRange), func(i int) bool {
|
||||
return !datesInRange[i].After(entry.date)
|
||||
})
|
||||
// If no date is <= entry.date, or the found date does not equal entry.date.
|
||||
if index == len(datesInRange) || !datesInRange[index].Equal(entry.date) {
|
||||
continue
|
||||
}
|
||||
appendEntry(filteredEntries, environment, test, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
return filteredEntries
|
||||
}
|
||||
|
||||
// Computes the flake rates over each entry in `splitEntries`.
|
||||
func computeFlakeRates(splitEntries splitEntryMap) map[string]map[string]float32 {
|
||||
flakeRates := make(map[string]map[string]float32)
|
||||
for environment, environmentSplit := range splitEntries {
|
||||
for test, testSplit := range environmentSplit {
|
||||
failures := 0
|
||||
for _, entry := range testSplit {
|
||||
if entry.status == "Failed" {
|
||||
failures++
|
||||
}
|
||||
}
|
||||
setValue(flakeRates, environment, test, float32(failures)/float32(len(testSplit)))
|
||||
}
|
||||
}
|
||||
return flakeRates
|
||||
}
|
||||
|
||||
// Computes the average durations over each entry in `splitEntries`.
|
||||
func computeAverageDurations(splitEntries splitEntryMap) map[string]map[string]float32 {
|
||||
averageDurations := make(map[string]map[string]float32)
|
||||
for environment, environmentSplit := range splitEntries {
|
||||
for test, testSplit := range environmentSplit {
|
||||
durationSum := float32(0)
|
||||
for _, entry := range testSplit {
|
||||
durationSum += entry.duration
|
||||
}
|
||||
if len(testSplit) != 0 {
|
||||
durationSum /= float32(len(testSplit))
|
||||
}
|
||||
setValue(averageDurations, environment, test, durationSum)
|
||||
}
|
||||
}
|
||||
return averageDurations
|
||||
}
|
||||
|
||||
// Sets the `value` of keys `environment` and `test` in `mapEntries`.
|
||||
func setValue(mapEntries map[string]map[string]float32, environment, test string, value float32) {
|
||||
// Lookup the environment.
|
||||
environmentRates, ok := mapEntries[environment]
|
||||
if !ok {
|
||||
// If the environment map is missing, make a map for this environment and store it.
|
||||
environmentRates = make(map[string]float32)
|
||||
mapEntries[environment] = environmentRates
|
||||
}
|
||||
environmentRates[test] = value
|
||||
}
|
||||
|
||||
// exit will exit and clean up minikube
|
||||
func exit(msg string, err error) {
|
||||
fmt.Printf("WithError(%s)=%v called from:\n%s", msg, err, debug.Stack())
|
||||
os.Exit(60)
|
||||
}
|
|
@ -0,0 +1,492 @@
|
|||
/*
|
||||
Copyright 2021 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func simpleDate(year int, day int) time.Time {
|
||||
return time.Date(year, time.January, day, 0, 0, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
func compareEntrySlices(t *testing.T, actualData, expectedData []testEntry, extra string) {
|
||||
if extra != "" {
|
||||
extra = fmt.Sprintf(" (%s)", extra)
|
||||
}
|
||||
for i, actual := range actualData {
|
||||
if len(expectedData) <= i {
|
||||
t.Errorf("Received unmatched actual element at index %d%s. Actual: %v", i, extra, actual)
|
||||
continue
|
||||
}
|
||||
expected := expectedData[i]
|
||||
if actual != expected {
|
||||
t.Errorf("Elements differ at index %d%s. Expected: %v, Actual: %v", i, extra, expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
if len(actualData) < len(expectedData) {
|
||||
for i := len(actualData); i < len(expectedData); i++ {
|
||||
t.Errorf("Missing unmatched expected element at index %d%s. Expected: %v", i, extra, expectedData[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadData(t *testing.T) {
|
||||
actualData := readData(strings.NewReader(
|
||||
`A,B,C,D,E,F
|
||||
hash,2000-01-01,env1,test1,Passed,1
|
||||
hash,2001-01-01,env2,test2,Failed,0.5
|
||||
hash,,,test1,,0.6
|
||||
hash,2002-01-01,,,Passed,0.9
|
||||
hash,2003-01-01,env3,test3,Passed,2`,
|
||||
))
|
||||
expectedData := []testEntry{
|
||||
{
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 1),
|
||||
status: "Passed",
|
||||
duration: 1,
|
||||
},
|
||||
{
|
||||
name: "test2",
|
||||
environment: "env2",
|
||||
date: simpleDate(2001, 1),
|
||||
status: "Failed",
|
||||
duration: 0.5,
|
||||
},
|
||||
{
|
||||
name: "test1",
|
||||
environment: "env2",
|
||||
date: simpleDate(2001, 1),
|
||||
status: "Failed",
|
||||
duration: 0.6,
|
||||
},
|
||||
{
|
||||
name: "test1",
|
||||
environment: "env2",
|
||||
date: simpleDate(2002, 1),
|
||||
status: "Passed",
|
||||
duration: 0.9,
|
||||
},
|
||||
{
|
||||
name: "test3",
|
||||
environment: "env3",
|
||||
date: simpleDate(2003, 1),
|
||||
status: "Passed",
|
||||
duration: 2,
|
||||
},
|
||||
}
|
||||
|
||||
compareEntrySlices(t, actualData, expectedData, "")
|
||||
}
|
||||
|
||||
func compareSplitData(t *testing.T, actual, expected splitEntryMap) {
|
||||
for environment, actualTests := range actual {
|
||||
expectedTests, environmentOk := expected[environment]
|
||||
if !environmentOk {
|
||||
t.Errorf("Unexpected environment %s in actual", environment)
|
||||
continue
|
||||
}
|
||||
|
||||
for test, actualEntries := range actualTests {
|
||||
expectedEntries, testOk := expectedTests[test]
|
||||
if !testOk {
|
||||
t.Errorf("Unexpected test %s (in environment %s) in actual", test, environment)
|
||||
continue
|
||||
}
|
||||
|
||||
compareEntrySlices(t, actualEntries, expectedEntries, fmt.Sprintf("environment %s, test %s", environment, test))
|
||||
}
|
||||
|
||||
for test := range expectedTests {
|
||||
_, testOk := actualTests[test]
|
||||
if !testOk {
|
||||
t.Errorf("Missing expected test %s (in environment %s) in actual", test, environment)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for environment := range expected {
|
||||
_, environmentOk := actual[environment]
|
||||
if !environmentOk {
|
||||
t.Errorf("Missing expected environment %s in actual", environment)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitData(t *testing.T) {
|
||||
entryE1T1_1, entryE1T1_2 := testEntry{
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 1),
|
||||
status: "Passed",
|
||||
}, testEntry{
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 2),
|
||||
status: "Passed",
|
||||
}
|
||||
entryE1T2 := testEntry{
|
||||
name: "test2",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 1),
|
||||
status: "Passed",
|
||||
}
|
||||
entryE2T1 := testEntry{
|
||||
name: "test1",
|
||||
environment: "env2",
|
||||
date: simpleDate(2000, 1),
|
||||
status: "Passed",
|
||||
}
|
||||
entryE2T2 := testEntry{
|
||||
name: "test2",
|
||||
environment: "env2",
|
||||
date: simpleDate(2000, 1),
|
||||
status: "Passed",
|
||||
}
|
||||
actual := splitData([]testEntry{entryE1T1_1, entryE1T1_2, entryE1T2, entryE2T1, entryE2T2})
|
||||
expected := splitEntryMap{
|
||||
"env1": {
|
||||
"test1": {entryE1T1_1, entryE1T1_2},
|
||||
"test2": {entryE1T2},
|
||||
},
|
||||
"env2": {
|
||||
"test1": {entryE2T1},
|
||||
"test2": {entryE2T2},
|
||||
},
|
||||
}
|
||||
|
||||
compareSplitData(t, actual, expected)
|
||||
}
|
||||
|
||||
func TestFilterRecentEntries(t *testing.T) {
|
||||
entryE1T1R1, entryE1T1R2, entryE1T1R3, entryE1T1O1, entryE1T1O2 := testEntry{
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 4),
|
||||
status: "Passed",
|
||||
}, testEntry{
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 3),
|
||||
status: "Passed",
|
||||
}, testEntry{
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 3),
|
||||
status: "Passed",
|
||||
}, testEntry{
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 2),
|
||||
status: "Passed",
|
||||
}, testEntry{
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 1),
|
||||
status: "Passed",
|
||||
}
|
||||
entryE1T2R1, entryE1T2R2, entryE1T2O1 := testEntry{
|
||||
name: "test2",
|
||||
environment: "env1",
|
||||
date: simpleDate(2001, 3),
|
||||
status: "Passed",
|
||||
}, testEntry{
|
||||
name: "test2",
|
||||
environment: "env1",
|
||||
date: simpleDate(2001, 2),
|
||||
status: "Passed",
|
||||
}, testEntry{
|
||||
name: "test2",
|
||||
environment: "env1",
|
||||
date: simpleDate(2001, 1),
|
||||
status: "Passed",
|
||||
}
|
||||
entryE2T2R1, entryE2T2R2, entryE2T2O1 := testEntry{
|
||||
name: "test2",
|
||||
environment: "env2",
|
||||
date: simpleDate(2003, 3),
|
||||
status: "Passed",
|
||||
}, testEntry{
|
||||
name: "test2",
|
||||
environment: "env2",
|
||||
date: simpleDate(2003, 2),
|
||||
status: "Passed",
|
||||
}, testEntry{
|
||||
name: "test2",
|
||||
environment: "env2",
|
||||
date: simpleDate(2003, 1),
|
||||
status: "Passed",
|
||||
}
|
||||
|
||||
actualData := filterRecentEntries(splitEntryMap{
|
||||
"env1": {
|
||||
"test1": {
|
||||
entryE1T1R1,
|
||||
entryE1T1R2,
|
||||
entryE1T1R3,
|
||||
entryE1T1O1,
|
||||
entryE1T1O2,
|
||||
},
|
||||
"test2": {
|
||||
entryE1T2R1,
|
||||
entryE1T2R2,
|
||||
entryE1T2O1,
|
||||
},
|
||||
},
|
||||
"env2": {
|
||||
"test2": {
|
||||
entryE2T2R1,
|
||||
entryE2T2R2,
|
||||
entryE2T2O1,
|
||||
},
|
||||
},
|
||||
}, 2)
|
||||
|
||||
expectedData := splitEntryMap{
|
||||
"env1": {
|
||||
"test1": {
|
||||
entryE1T1R1,
|
||||
entryE1T1R2,
|
||||
entryE1T1R3,
|
||||
},
|
||||
"test2": {
|
||||
entryE1T2R1,
|
||||
entryE1T2R2,
|
||||
},
|
||||
},
|
||||
"env2": {
|
||||
"test2": {
|
||||
entryE2T2R1,
|
||||
entryE2T2R2,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
compareSplitData(t, actualData, expectedData)
|
||||
}
|
||||
|
||||
func compareValues(t *testing.T, actualValues, expectedValues map[string]map[string]float32) {
|
||||
for environment, actualTests := range actualValues {
|
||||
expectedTests, environmentOk := expectedValues[environment]
|
||||
if !environmentOk {
|
||||
t.Errorf("Unexpected environment %s in actual", environment)
|
||||
continue
|
||||
}
|
||||
|
||||
for test, actualValue := range actualTests {
|
||||
expectedValue, testOk := expectedTests[test]
|
||||
if !testOk {
|
||||
t.Errorf("Unexpected test %s (in environment %s) in actual", test, environment)
|
||||
continue
|
||||
}
|
||||
|
||||
if actualValue != expectedValue {
|
||||
t.Errorf("Wrong value at environment %s and test %s. Expected: %v, Actual: %v", environment, test, expectedValue, actualValue)
|
||||
}
|
||||
}
|
||||
|
||||
for test := range expectedTests {
|
||||
_, testOk := actualTests[test]
|
||||
if !testOk {
|
||||
t.Errorf("Missing expected test %s (in environment %s) in actual", test, environment)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for environment := range expectedValues {
|
||||
_, environmentOk := actualValues[environment]
|
||||
if !environmentOk {
|
||||
t.Errorf("Missing expected environment %s in actual", environment)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeFlakeRates(t *testing.T) {
|
||||
actualData := computeFlakeRates(splitEntryMap{
|
||||
"env1": {
|
||||
"test1": {
|
||||
{
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 4),
|
||||
status: "Passed",
|
||||
}, {
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 3),
|
||||
status: "Passed",
|
||||
}, {
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 3),
|
||||
status: "Passed",
|
||||
}, {
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 2),
|
||||
status: "Passed",
|
||||
}, {
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 1),
|
||||
status: "Failed",
|
||||
},
|
||||
},
|
||||
"test2": {
|
||||
{
|
||||
name: "test2",
|
||||
environment: "env1",
|
||||
date: simpleDate(2001, 3),
|
||||
status: "Failed",
|
||||
}, {
|
||||
name: "test2",
|
||||
environment: "env1",
|
||||
date: simpleDate(2001, 2),
|
||||
status: "Failed",
|
||||
}, {
|
||||
name: "test2",
|
||||
environment: "env1",
|
||||
date: simpleDate(2001, 1),
|
||||
status: "Failed",
|
||||
},
|
||||
},
|
||||
},
|
||||
"env2": {
|
||||
"test2": {
|
||||
{
|
||||
name: "test2",
|
||||
environment: "env2",
|
||||
date: simpleDate(2003, 3),
|
||||
status: "Passed",
|
||||
}, testEntry{
|
||||
name: "test2",
|
||||
environment: "env2",
|
||||
date: simpleDate(2003, 2),
|
||||
status: "Failed",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
expectedData := map[string]map[string]float32{
|
||||
"env1": {
|
||||
"test1": 0.2,
|
||||
"test2": 1,
|
||||
},
|
||||
"env2": {
|
||||
"test2": 0.5,
|
||||
},
|
||||
}
|
||||
|
||||
compareValues(t, actualData, expectedData)
|
||||
}
|
||||
|
||||
func TestComputeAverageDurations(t *testing.T) {
|
||||
actualData := computeAverageDurations(splitEntryMap{
|
||||
"env1": {
|
||||
"test1": {
|
||||
{
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 4),
|
||||
status: "Passed",
|
||||
duration: 1,
|
||||
}, {
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 3),
|
||||
status: "Passed",
|
||||
duration: 2,
|
||||
}, {
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 3),
|
||||
status: "Passed",
|
||||
duration: 3,
|
||||
}, {
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 2),
|
||||
status: "Passed",
|
||||
duration: 3,
|
||||
}, {
|
||||
name: "test1",
|
||||
environment: "env1",
|
||||
date: simpleDate(2000, 1),
|
||||
status: "Failed",
|
||||
duration: 3,
|
||||
},
|
||||
},
|
||||
"test2": {
|
||||
{
|
||||
name: "test2",
|
||||
environment: "env1",
|
||||
date: simpleDate(2001, 3),
|
||||
status: "Failed",
|
||||
duration: 1,
|
||||
}, {
|
||||
name: "test2",
|
||||
environment: "env1",
|
||||
date: simpleDate(2001, 2),
|
||||
status: "Failed",
|
||||
duration: 3,
|
||||
}, {
|
||||
name: "test2",
|
||||
environment: "env1",
|
||||
date: simpleDate(2001, 1),
|
||||
status: "Failed",
|
||||
duration: 3,
|
||||
},
|
||||
},
|
||||
},
|
||||
"env2": {
|
||||
"test2": {
|
||||
{
|
||||
name: "test2",
|
||||
environment: "env2",
|
||||
date: simpleDate(2003, 3),
|
||||
status: "Passed",
|
||||
duration: 0.5,
|
||||
}, testEntry{
|
||||
name: "test2",
|
||||
environment: "env2",
|
||||
date: simpleDate(2003, 2),
|
||||
status: "Failed",
|
||||
duration: 1.5,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
expectedData := map[string]map[string]float32{
|
||||
"env1": {
|
||||
"test1": float32(12) / float32(5),
|
||||
"test2": float32(7) / float32(3),
|
||||
},
|
||||
"env2": {
|
||||
"test2": 1,
|
||||
},
|
||||
}
|
||||
|
||||
compareValues(t, actualData, expectedData)
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
<html>
|
||||
<head>
|
||||
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
|
||||
<style>
|
||||
table {
|
||||
border: 1px solid gray;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
td, th {
|
||||
border-bottom: 1px solid gray;
|
||||
padding: 8px;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="chart_div"></div>
|
||||
</body>
|
||||
<script src="flake_chart.js"></script>
|
||||
</html>
|
|
@ -0,0 +1,530 @@
|
|||
|
||||
// Displays an error message to the UI. Any previous message will be erased.
|
||||
function displayError(message) {
|
||||
// Clear the body of all children.
|
||||
while (document.body.firstChild) {
|
||||
document.body.removeChild(document.body.firstChild);
|
||||
}
|
||||
const element = document.createElement("p");
|
||||
element.innerText = "Error: " + message;
|
||||
element.style.color = "red";
|
||||
element.style.fontFamily = "Arial";
|
||||
element.style.fontWeight = "bold";
|
||||
element.style.margin = "5rem";
|
||||
document.body.appendChild(element);
|
||||
}
|
||||
|
||||
// Creates a generator that reads the response body one line at a time.
|
||||
async function* bodyByLinesIterator(response, updateProgress) {
|
||||
const utf8Decoder = new TextDecoder('utf-8');
|
||||
const reader = response.body.getReader();
|
||||
|
||||
const re = /\n|\r|\r\n/gm;
|
||||
let pendingText = "";
|
||||
|
||||
let readerDone = false;
|
||||
while (!readerDone) {
|
||||
// Read a chunk.
|
||||
const { value: chunk, done } = await reader.read();
|
||||
readerDone = done;
|
||||
if (!chunk) {
|
||||
continue;
|
||||
}
|
||||
// Notify the listener of progress.
|
||||
updateProgress(chunk.length);
|
||||
const decodedChunk = utf8Decoder.decode(chunk);
|
||||
|
||||
let startIndex = 0;
|
||||
let result;
|
||||
// Keep processing until there are no more new lines.
|
||||
while ((result = re.exec(decodedChunk)) !== null) {
|
||||
const text = decodedChunk.substring(startIndex, result.index);
|
||||
startIndex = re.lastIndex;
|
||||
|
||||
const line = pendingText + text;
|
||||
pendingText = "";
|
||||
if (line !== "") {
|
||||
yield line;
|
||||
}
|
||||
}
|
||||
// Any text after the last new line is appended to any pending text.
|
||||
pendingText += decodedChunk.substring(startIndex);
|
||||
}
|
||||
|
||||
// If there is any text remaining, return it.
|
||||
if (pendingText !== "") {
|
||||
yield pendingText;
|
||||
}
|
||||
}
|
||||
|
||||
// Determines whether `str` matches at least one value in `enumObject`.
|
||||
function isValidEnumValue(enumObject, str) {
|
||||
for (const enumKey in enumObject) {
|
||||
if (enumObject[enumKey] === str) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Enum for test status.
|
||||
const testStatus = {
|
||||
PASSED: "Passed",
|
||||
FAILED: "Failed",
|
||||
SKIPPED: "Skipped"
|
||||
}
|
||||
|
||||
async function loadTestData() {
|
||||
const response = await fetch("data.csv");
|
||||
if (!response.ok) {
|
||||
const responseText = await response.text();
|
||||
throw `Failed to fetch data from GCS bucket. Error: ${responseText}`;
|
||||
}
|
||||
|
||||
const box = document.createElement("div");
|
||||
box.style.width = "100%";
|
||||
const innerBox = document.createElement("div");
|
||||
innerBox.style.margin = "5rem";
|
||||
box.appendChild(innerBox);
|
||||
const progressBarPrompt = document.createElement("h1");
|
||||
progressBarPrompt.style.fontFamily = "Arial";
|
||||
progressBarPrompt.style.textAlign = "center";
|
||||
progressBarPrompt.innerText = "Downloading data...";
|
||||
innerBox.appendChild(progressBarPrompt);
|
||||
const progressBar = document.createElement("progress");
|
||||
progressBar.setAttribute("max", Number(response.headers.get('Content-Length')));
|
||||
progressBar.style.width = "100%";
|
||||
innerBox.appendChild(progressBar);
|
||||
document.body.appendChild(box);
|
||||
|
||||
let readBytes = 0;
|
||||
const lines = bodyByLinesIterator(response, value => {
|
||||
readBytes += value;
|
||||
progressBar.setAttribute("value", readBytes);
|
||||
});
|
||||
// Consume the header to ensure the data has the right number of fields.
|
||||
const header = (await lines.next()).value;
|
||||
if (header.split(",").length != 6) {
|
||||
document.body.removeChild(box);
|
||||
throw `Fetched CSV data contains wrong number of fields. Expected: 6. Actual Header: "${header}"`;
|
||||
}
|
||||
|
||||
const testData = [];
|
||||
let lineData = ["", "", "", "", "", ""];
|
||||
for await (const line of lines) {
|
||||
let splitLine = line.split(",");
|
||||
if (splitLine.length != 6) {
|
||||
console.warn(`Found line with wrong number of fields. Actual: ${splitLine.length} Expected: 6. Line: "${line}"`);
|
||||
continue;
|
||||
}
|
||||
splitLine = splitLine.map((value, index) => value === "" ? lineData[index] : value);
|
||||
lineData = splitLine;
|
||||
if (!isValidEnumValue(testStatus, splitLine[4])) {
|
||||
console.warn(`Invalid test status provided. Actual: ${splitLine[4]} Expected: One of ${Object.values(testStatus).join(", ")}`);
|
||||
continue;
|
||||
}
|
||||
testData.push({
|
||||
commit: splitLine[0],
|
||||
date: new Date(splitLine[1]),
|
||||
environment: splitLine[2],
|
||||
name: splitLine[3],
|
||||
status: splitLine[4],
|
||||
duration: Number(splitLine[5]),
|
||||
});
|
||||
}
|
||||
document.body.removeChild(box);
|
||||
if (testData.length == 0) {
|
||||
throw "Fetched CSV data is empty or poorly formatted.";
|
||||
}
|
||||
return testData;
|
||||
}
|
||||
|
||||
Array.prototype.sum = function() {
|
||||
return this.reduce((sum, value) => sum + value, 0);
|
||||
};
|
||||
|
||||
// Computes the average of an array of numbers.
|
||||
Array.prototype.average = function () {
|
||||
return this.length === 0 ? 0 : (this.sum() / this.length);
|
||||
};
|
||||
|
||||
// Groups array elements by keys obtained through `keyGetter`.
|
||||
Array.prototype.groupBy = function (keyGetter) {
|
||||
return Array.from(this.reduce((mapCollection, element) => {
|
||||
const key = keyGetter(element);
|
||||
if (mapCollection.has(key)) {
|
||||
mapCollection.get(key).push(element);
|
||||
} else {
|
||||
mapCollection.set(key, [element]);
|
||||
}
|
||||
return mapCollection;
|
||||
}, new Map()).values());
|
||||
};
|
||||
|
||||
// Parse URL search `query` into [{key, value}].
|
||||
function parseUrlQuery(query) {
|
||||
if (query[0] === '?') {
|
||||
query = query.substring(1);
|
||||
}
|
||||
return Object.fromEntries((query === "" ? [] : query.split("&")).map(element => {
|
||||
const keyValue = element.split("=");
|
||||
return [unescape(keyValue[0]), unescape(keyValue[1])];
|
||||
}));
|
||||
}
|
||||
|
||||
// Takes a set of test runs (all of the same test), and aggregates them into one element per date.
|
||||
function aggregateRuns(testRuns) {
|
||||
return testRuns
|
||||
// Group runs by the date it ran.
|
||||
.groupBy(run => run.date.getTime())
|
||||
// Sort by run date, past to future.
|
||||
.sort((a, b) => a[0].date - b[0].date)
|
||||
// Map each group to all variables need to format the rows.
|
||||
.map(tests => ({
|
||||
date: tests[0].date, // Get one of the dates from the tests (which will all be the same).
|
||||
flakeRate: tests.map(test => test.status === testStatus.FAILED ? 100 : 0).average(), // Compute average of runs where FAILED counts as 100%.
|
||||
duration: tests.map(test => test.duration).average(), // Compute average duration of runs.
|
||||
commitHashes: tests.map(test => ({ // Take all hashes, statuses, and durations of tests in this group.
|
||||
hash: test.commit,
|
||||
status: test.status,
|
||||
duration: test.duration
|
||||
})).groupBy(run => run.hash).map(runsWithSameHash => ({
|
||||
hash: runsWithSameHash[0].hash,
|
||||
failures: runsWithSameHash.map(run => run.status === testStatus.FAILED ? 1 : 0).sum(),
|
||||
runs: runsWithSameHash.length,
|
||||
duration: runsWithSameHash.map(run => run.duration).average(),
|
||||
}))
|
||||
}));
|
||||
}
|
||||
|
||||
const hashToLink = (hash, environment) => `https://storage.googleapis.com/minikube-builds/logs/master/${hash.substring(0,7)}/${environment}.html`;
|
||||
|
||||
function displayTestAndEnvironmentChart(testData, testName, environmentName) {
|
||||
const data = new google.visualization.DataTable();
|
||||
data.addColumn('date', 'Date');
|
||||
data.addColumn('number', 'Flake Percentage');
|
||||
data.addColumn({ type: 'string', role: 'tooltip', 'p': { 'html': true } });
|
||||
data.addColumn('number', 'Duration');
|
||||
data.addColumn({ type: 'string', role: 'tooltip', 'p': { 'html': true } });
|
||||
|
||||
const testRuns = testData
|
||||
// Filter to only contain unskipped runs of the requested test and requested environment.
|
||||
.filter(test => test.name === testName && test.environment === environmentName && test.status !== testStatus.SKIPPED);
|
||||
|
||||
data.addRows(
|
||||
aggregateRuns(testRuns)
|
||||
.map(groupData => [
|
||||
groupData.date,
|
||||
groupData.flakeRate,
|
||||
`<div style="padding: 1rem; font-family: 'Arial'; font-size: 14">
|
||||
<b>${groupData.date.toString()}</b><br>
|
||||
<b>Flake Percentage:</b> ${groupData.flakeRate.toFixed(2)}%<br>
|
||||
<b>Hashes:</b><br>
|
||||
${groupData.commitHashes.map(({ hash, failures, runs }) => ` - <a href="${hashToLink(hash, environmentName)}">${hash}</a> (Failures: ${failures}/${runs})`).join("<br>")}
|
||||
</div>`,
|
||||
groupData.duration,
|
||||
`<div style="padding: 1rem; font-family: 'Arial'; font-size: 14">
|
||||
<b>${groupData.date.toString()}</b><br>
|
||||
<b>Average Duration:</b> ${groupData.duration.toFixed(2)}s<br>
|
||||
<b>Hashes:</b><br>
|
||||
${groupData.commitHashes.map(({ hash, runs, duration }) => ` - <a href="${hashToLink(hash, environmentName)}">${hash}</a> (Average of ${runs}: ${duration.toFixed(2)}s)`).join("<br>")}
|
||||
</div>`,
|
||||
])
|
||||
);
|
||||
|
||||
const options = {
|
||||
title: `Flake rate and duration by day of ${testName} on ${environmentName}`,
|
||||
width: window.innerWidth,
|
||||
height: window.innerHeight,
|
||||
pointSize: 10,
|
||||
pointShape: "circle",
|
||||
series: {
|
||||
0: { targetAxisIndex: 0 },
|
||||
1: { targetAxisIndex: 1 },
|
||||
},
|
||||
vAxes: {
|
||||
0: { title: "Flake rate", minValue: 0, maxValue: 100 },
|
||||
1: { title: "Duration (seconds)" },
|
||||
},
|
||||
colors: ['#dc3912', '#3366cc'],
|
||||
tooltip: { trigger: "selection", isHtml: true }
|
||||
};
|
||||
const chart = new google.visualization.LineChart(document.getElementById('chart_div'));
|
||||
chart.draw(data, options);
|
||||
}
|
||||
|
||||
function createRecentFlakePercentageTable(recentFlakePercentage, previousFlakePercentageMap, environmentName) {
|
||||
const createCell = (elementType, text) => {
|
||||
const element = document.createElement(elementType);
|
||||
element.innerHTML = text;
|
||||
return element;
|
||||
}
|
||||
|
||||
const table = document.createElement("table");
|
||||
const tableHeaderRow = document.createElement("tr");
|
||||
tableHeaderRow.appendChild(createCell("th", "Rank"));
|
||||
tableHeaderRow.appendChild(createCell("th", "Test Name")).style.textAlign = "left";
|
||||
tableHeaderRow.appendChild(createCell("th", "Recent Flake Percentage"));
|
||||
tableHeaderRow.appendChild(createCell("th", "Growth (since last 15 days)"));
|
||||
table.appendChild(tableHeaderRow);
|
||||
for (let i = 0; i < recentFlakePercentage.length; i++) {
|
||||
const {testName, flakeRate} = recentFlakePercentage[i];
|
||||
const row = document.createElement("tr");
|
||||
row.appendChild(createCell("td", "" + (i + 1))).style.textAlign = "center";
|
||||
row.appendChild(createCell("td", `<a href="${window.location.pathname}?env=${environmentName}&test=${testName}">${testName}</a>`));
|
||||
row.appendChild(createCell("td", `${flakeRate.toFixed(2)}%`)).style.textAlign = "right";
|
||||
const growth = previousFlakePercentageMap.has(testName) ?
|
||||
flakeRate - previousFlakePercentageMap.get(testName) : 0;
|
||||
row.appendChild(createCell("td", `<span style="color: ${growth === 0 ? "black" : (growth > 0 ? "red" : "green")}">${growth > 0 ? '+' + growth.toFixed(2) : growth.toFixed(2)}%</span>`));
|
||||
table.appendChild(row);
|
||||
}
|
||||
return table;
|
||||
}
|
||||
|
||||
function displayEnvironmentChart(testData, environmentName) {
|
||||
// Number of days to use to look for "flaky-est" tests.
|
||||
const dateRange = 15;
|
||||
// Number of tests to display in chart.
|
||||
const topFlakes = 10;
|
||||
|
||||
testData = testData
|
||||
// Filter to only contain unskipped runs of the requested environment.
|
||||
.filter(test => test.environment === environmentName && test.status !== testStatus.SKIPPED);
|
||||
|
||||
const testRuns = testData
|
||||
.groupBy(test => test.name);
|
||||
|
||||
const aggregatedRuns = new Map(testRuns.map(test => [
|
||||
test[0].name,
|
||||
new Map(aggregateRuns(test)
|
||||
.map(runDate => [ runDate.date.getTime(), runDate ]))]));
|
||||
const uniqueDates = new Set();
|
||||
for (const [_, runDateMap] of aggregatedRuns) {
|
||||
for (const [dateTime, _] of runDateMap) {
|
||||
uniqueDates.add(dateTime);
|
||||
}
|
||||
}
|
||||
const orderedDates = Array.from(uniqueDates).sort();
|
||||
const recentDates = orderedDates.slice(-dateRange);
|
||||
const previousDates = orderedDates.slice(-2 * dateRange, -dateRange);
|
||||
|
||||
const computeFlakePercentage = (runs, dates) => Array.from(runs).map(([testName, data]) => {
|
||||
const {flakeCount, totalCount} = dates.map(date => {
|
||||
const dateInfo = data.get(date);
|
||||
return dateInfo === undefined ? null : {
|
||||
flakeRate: dateInfo.flakeRate,
|
||||
runs: dateInfo.commitHashes.length
|
||||
};
|
||||
}).filter(dateInfo => dateInfo != null)
|
||||
.reduce(({flakeCount, totalCount}, {flakeRate, runs}) => ({
|
||||
flakeCount: flakeRate * runs + flakeCount,
|
||||
totalCount: runs + totalCount
|
||||
}), {flakeCount: 0, totalCount: 0});
|
||||
return {
|
||||
testName,
|
||||
flakeRate: totalCount === 0 ? 0 : flakeCount / totalCount,
|
||||
};
|
||||
});
|
||||
|
||||
const recentFlakePercentage = computeFlakePercentage(aggregatedRuns, recentDates)
|
||||
.sort((a, b) => b.flakeRate - a.flakeRate);
|
||||
const previousFlakePercentageMap = new Map(
|
||||
computeFlakePercentage(aggregatedRuns, previousDates)
|
||||
.map(({testName, flakeRate}) => [testName, flakeRate]));
|
||||
|
||||
const recentTopFlakes = recentFlakePercentage
|
||||
.slice(0, topFlakes)
|
||||
.map(({testName}) => testName);
|
||||
|
||||
const chartsContainer = document.getElementById('chart_div');
|
||||
{
|
||||
const data = new google.visualization.DataTable();
|
||||
data.addColumn('date', 'Date');
|
||||
for (const name of recentTopFlakes) {
|
||||
data.addColumn('number', `Flake Percentage - ${name}`);
|
||||
data.addColumn({ type: 'string', role: 'tooltip', 'p': { 'html': true } });
|
||||
}
|
||||
data.addRows(
|
||||
orderedDates.map(dateTime => [new Date(dateTime)].concat(recentTopFlakes.map(name => {
|
||||
const data = aggregatedRuns.get(name).get(dateTime);
|
||||
return data !== undefined ? [
|
||||
data.flakeRate,
|
||||
`<div style="padding: 1rem; font-family: 'Arial'; font-size: 14">
|
||||
<b style="display: block">${name}</b><br>
|
||||
<b>${data.date.toString()}</b><br>
|
||||
<b>Flake Percentage:</b> ${data.flakeRate.toFixed(2)}%<br>
|
||||
<b>Hashes:</b><br>
|
||||
${data.commitHashes.map(({ hash, failures, runs }) => ` - <a href="${hashToLink(hash, environmentName)}">${hash}</a> (Failures: ${failures}/${runs})`).join("<br>")}
|
||||
</div>`
|
||||
] : [null, null];
|
||||
})).flat())
|
||||
);
|
||||
const options = {
|
||||
title: `Flake rate by day of top ${topFlakes} of recent test flakiness (past ${dateRange} days) on ${environmentName}`,
|
||||
width: window.innerWidth,
|
||||
height: window.innerHeight,
|
||||
pointSize: 10,
|
||||
pointShape: "circle",
|
||||
vAxes: {
|
||||
0: { title: "Flake rate", minValue: 0, maxValue: 100 },
|
||||
},
|
||||
tooltip: { trigger: "selection", isHtml: true }
|
||||
};
|
||||
const flakeRateContainer = document.createElement("div");
|
||||
flakeRateContainer.style.width = "100vw";
|
||||
flakeRateContainer.style.height = "100vh";
|
||||
chartsContainer.appendChild(flakeRateContainer);
|
||||
const chart = new google.visualization.LineChart(flakeRateContainer);
|
||||
chart.draw(data, options);
|
||||
}
|
||||
{
|
||||
const data = new google.visualization.DataTable();
|
||||
data.addColumn('date', 'Date');
|
||||
for (const name of recentTopFlakes) {
|
||||
data.addColumn('number', `Duration - ${name}`);
|
||||
data.addColumn({ type: 'string', role: 'tooltip', 'p': { 'html': true } });
|
||||
}
|
||||
data.addRows(
|
||||
orderedDates.map(dateTime => [new Date(dateTime)].concat(recentTopFlakes.map(name => {
|
||||
const data = aggregatedRuns.get(name).get(dateTime);
|
||||
return data !== undefined ? [
|
||||
data.duration,
|
||||
`<div style="padding: 1rem; font-family: 'Arial'; font-size: 14">
|
||||
<b style="display: block">${name}</b><br>
|
||||
<b>${data.date.toString()}</b><br>
|
||||
<b>Average Duration:</b> ${data.duration.toFixed(2)}s<br>
|
||||
<b>Hashes:</b><br>
|
||||
${data.commitHashes.map(({ hash, duration, runs }) => ` - <a href="${hashToLink(hash, environmentName)}">${hash}</a> (Average Duration: ${duration.toFixed(2)}s [${runs} runs])`).join("<br>")}
|
||||
</div>`
|
||||
] : [null, null];
|
||||
})).flat())
|
||||
);
|
||||
const options = {
|
||||
title: `Average duration by day of top ${topFlakes} of recent test flakiness (past ${dateRange} days) on ${environmentName}`,
|
||||
width: window.innerWidth,
|
||||
height: window.innerHeight,
|
||||
pointSize: 10,
|
||||
pointShape: "circle",
|
||||
vAxes: {
|
||||
0: { title: "Average Duration (s)" },
|
||||
},
|
||||
tooltip: { trigger: "selection", isHtml: true }
|
||||
};
|
||||
const durationContainer = document.createElement("div");
|
||||
durationContainer.style.width = "100vw";
|
||||
durationContainer.style.height = "100vh";
|
||||
chartsContainer.appendChild(durationContainer);
|
||||
const chart = new google.visualization.LineChart(durationContainer);
|
||||
chart.draw(data, options);
|
||||
}
|
||||
{
|
||||
// Group test runs by their date, then by their commit, and finally by test names.
|
||||
const testCountData = testData
|
||||
// Group by date.
|
||||
.groupBy(run => run.date.getTime())
|
||||
.map(runDate => ({
|
||||
date: runDate[0].date,
|
||||
commits: runDate
|
||||
// Group by commit
|
||||
.groupBy(run => run.commit)
|
||||
.map(commitRuns => commitRuns
|
||||
// Group by test name.
|
||||
.groupBy(commitRun => commitRun.name)
|
||||
// Consolidate tests of a single name into a single object.
|
||||
.reduce((accum, commitTestRuns) => ({
|
||||
commit: commitTestRuns[0].commit,
|
||||
// The total number of times any test ran.
|
||||
sumTestCount: accum.sumTestCount + commitTestRuns.length,
|
||||
// The total number of times any test failed.
|
||||
sumFailCount: accum.sumFailCount + commitTestRuns.filter(run => run.status === testStatus.FAILED).length,
|
||||
// The most number of times any test name ran (this will be a proxy for the number of integration jobs were triggered).
|
||||
maxRunCount: Math.max(accum.maxRunCount, commitTestRuns.length),
|
||||
}), {
|
||||
sumTestCount: 0,
|
||||
sumFailCount: 0,
|
||||
maxRunCount: 0
|
||||
}))
|
||||
}))
|
||||
.map(dateInfo => ({
|
||||
...dateInfo,
|
||||
// Use the commit data of each date to compute the average test count and average fail count for the day.
|
||||
testCount: dateInfo.commits.reduce(
|
||||
(accum, commitInfo) => accum + (commitInfo.sumTestCount / commitInfo.maxRunCount), 0) / dateInfo.commits.length,
|
||||
failCount: dateInfo.commits.reduce(
|
||||
(accum, commitInfo) => accum + (commitInfo.sumFailCount / commitInfo.maxRunCount), 0) / dateInfo.commits.length,
|
||||
}))
|
||||
.sort((a, b) => a.date - b.date);
|
||||
|
||||
const data = new google.visualization.DataTable();
|
||||
data.addColumn('date', 'Date');
|
||||
data.addColumn('number', 'Test Count');
|
||||
data.addColumn({ type: 'string', role: 'tooltip', 'p': { 'html': true } });
|
||||
data.addColumn('number', 'Failed Tests');
|
||||
data.addColumn({ type: 'string', role: 'tooltip', 'p': { 'html': true } });
|
||||
data.addRows(
|
||||
testCountData.map(dateInfo => [
|
||||
dateInfo.date,
|
||||
dateInfo.testCount,
|
||||
`<div style="padding: 1rem; font-family: 'Arial'; font-size: 14">
|
||||
<b>${dateInfo.date.toString()}</b><br>
|
||||
<b>Test Count (averaged): </b> ${+dateInfo.testCount.toFixed(2)}<br>
|
||||
<b>Hashes:</b><br>
|
||||
${dateInfo.commits.map(commit => ` - <a href="${hashToLink(commit.commit, environmentName)}">${commit.commit}</a> (Test count (averaged): ${+(commit.sumTestCount / commit.maxRunCount).toFixed(2)} [${commit.sumTestCount} tests / ${commit.maxRunCount} runs])`).join("<br>")}
|
||||
</div>`,
|
||||
dateInfo.failCount,
|
||||
`<div style="padding: 1rem; font-family: 'Arial'; font-size: 14">
|
||||
<b>${dateInfo.date.toString()}</b><br>
|
||||
<b>Fail Count (averaged): </b> ${+dateInfo.failCount.toFixed(2)}<br>
|
||||
<b>Hashes:</b><br>
|
||||
${dateInfo.commits.map(commit => ` - <a href="${hashToLink(commit.commit, environmentName)}">${commit.commit}</a> (Fail count (averaged): ${+(commit.sumFailCount / commit.maxRunCount).toFixed(2)} [${commit.sumFailCount} fails / ${commit.maxRunCount} runs])`).join("<br>")}
|
||||
</div>`,
|
||||
]));
|
||||
const options = {
|
||||
title: `Test count by day on ${environmentName}`,
|
||||
width: window.innerWidth,
|
||||
height: window.innerHeight,
|
||||
pointSize: 10,
|
||||
pointShape: "circle",
|
||||
vAxes: {
|
||||
0: { title: "Test Count" },
|
||||
1: { title: "Failed Tests" },
|
||||
},
|
||||
tooltip: { trigger: "selection", isHtml: true }
|
||||
};
|
||||
const testCountContainer = document.createElement("div");
|
||||
testCountContainer.style.width = "100vw";
|
||||
testCountContainer.style.height = "100vh";
|
||||
chartsContainer.appendChild(testCountContainer);
|
||||
const chart = new google.visualization.LineChart(testCountContainer);
|
||||
chart.draw(data, options);
|
||||
}
|
||||
|
||||
document.body.appendChild(createRecentFlakePercentageTable(recentFlakePercentage, previousFlakePercentageMap, environmentName));
|
||||
}
|
||||
|
||||
async function init() {
|
||||
google.charts.load('current', { 'packages': ['corechart'] });
|
||||
let testData;
|
||||
try {
|
||||
// Wait for Google Charts to load, and for test data to load.
|
||||
// Only store the test data (at index 1) into `testData`.
|
||||
testData = (await Promise.all([
|
||||
new Promise(resolve => google.charts.setOnLoadCallback(resolve)),
|
||||
loadTestData()
|
||||
]))[1];
|
||||
} catch (err) {
|
||||
displayError(err);
|
||||
return;
|
||||
}
|
||||
|
||||
const query = parseUrlQuery(window.location.search);
|
||||
const desiredTest = query.test, desiredEnvironment = query.env || "";
|
||||
|
||||
if (desiredTest === undefined) {
|
||||
displayEnvironmentChart(testData, desiredEnvironment);
|
||||
} else {
|
||||
displayTestAndEnvironmentChart(testData, desiredTest, desiredEnvironment);
|
||||
}
|
||||
}
|
||||
|
||||
init();
|
|
@ -0,0 +1,31 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Takes a CSV file through stdin, compresses it and writes it to stdout.
|
||||
# Example usage: < data.csv ./optimize_data.sh > data_optimized.csv
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
# Take input CSV. For each field, if it is the same as the previous row, replace it with an empty string.
|
||||
# This is to compress the input CSV. Example:
|
||||
# Input:
|
||||
# hash,2021-06-10,Docker_Linux,TestFunctional,Passed,0.5
|
||||
# hash,2021-06-10,Docker_Linux_containerd,TestFunctional,Failed,0.6
|
||||
#
|
||||
# Output:
|
||||
# hash,2021-06-10,Docker_Linux,TestFunctional,Passed,0.5
|
||||
# ,,DockerLinux_containerd,,Failed,0.6
|
||||
awk -F, 'BEGIN {OFS = FS} { for(i=1; i<=NF; i++) { if($i == j[i]) { $i = ""; } else { j[i] = $i; } } printf "%s\n",$0 }'
|
|
@ -0,0 +1,32 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Takes a series of gopogh summary jsons, and formats them into a CSV file with
|
||||
# a row for each test.
|
||||
# Example usage: cat gopogh_1.json gopogh_2.json gopogh_3.json | ./process_data.sh
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
# Print header.
|
||||
printf "Commit Hash,Test Date,Environment,Test,Status,Duration\n"
|
||||
|
||||
# Turn each test in each summary file to a CSV line containing its commit hash, date, environment, test, status, and duration.
|
||||
# Example line:
|
||||
# 247982745892,2021-06-10,Docker_Linux,TestFunctional,Passed,0.5
|
||||
jq -r '((.PassedTests[]? as $name | {commit: (.Detail.Details | split(":") | .[0]), date: (.Detail.Details | split(":") | .[1]), environment: .Detail.Name, test: $name, duration: .Durations[$name], status: "Passed"}),
|
||||
(.FailedTests[]? as $name | {commit: (.Detail.Details | split(":") | .[0]), date: (.Detail.Details | split(":") | .[1]), environment: .Detail.Name, test: $name, duration: .Durations[$name], status: "Failed"}),
|
||||
(.SkippedTests[]? as $name | {commit: (.Detail.Details | split(":") | .[0]), date: (.Detail.Details | split(":") | .[1]), environment: .Detail.Name, test: $name, duration: 0, status: "Skipped"}))
|
||||
| .commit + "," + .date + "," + .environment + "," + .test + "," + .status + "," + (.duration | tostring)'
|
|
@ -0,0 +1,90 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Creates a comment on the provided PR number, using the provided gopogh summary
|
||||
# to list out the flake rates of all failing tests.
|
||||
# Example usage: ./report_flakes.sh 11602 gopogh.json Docker_Linux
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
if [ "$#" -ne 3 ]; then
|
||||
echo "Wrong number of arguments. Usage: report_flakes.sh <PR number> <short commit> <environment list file>" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PR_NUMBER=$1
|
||||
SHORT_COMMIT=$2
|
||||
ENVIRONMENT_LIST=$3
|
||||
|
||||
# To prevent having a super-long comment, add a maximum number of tests to report.
|
||||
MAX_REPORTED_TESTS=30
|
||||
|
||||
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
|
||||
TMP_DATA=$(mktemp)
|
||||
# 1) Process the data in each gopogh summary.
|
||||
# 2) Filter tests to only include failed tests (and only get their names and environment).
|
||||
# 3) Sort by environment, then test name.
|
||||
# 4) Store in file $TMP_DATA.
|
||||
gsutil cat $(< "${ENVIRONMENT_LIST}" sed -r "s/^/gs:\\/\\/minikube-builds\\/logs\\/${PR_NUMBER}\\/${SHORT_COMMIT}\\/; s/$/_summary.json/") \
|
||||
| $DIR/process_data.sh \
|
||||
| sed -n -r -e "s/[0-9a-f]*,[0-9-]*,([a-zA-Z\/_0-9-]*),([a-zA-Z\/_0-9-]*),Failed,[.0-9]*/\1:\2/p" \
|
||||
| sort -t, -k\
|
||||
> "$TMP_DATA"
|
||||
|
||||
# Download the precomputed flake rates from the GCS bucket into file $TMP_FLAKE_RATES.
|
||||
TMP_FLAKE_RATES=$(mktemp)
|
||||
gsutil cp gs://minikube-flake-rate/flake_rates.csv "$TMP_FLAKE_RATES"
|
||||
|
||||
TMP_FAILED_RATES="$TMP_FLAKE_RATES\_filtered"
|
||||
# 1) Parse the flake rates to only include the environment, test name, and flake rates.
|
||||
# 2) Sort the flake rates based on environment+test name.
|
||||
# 3) Join the flake rates with the failing tests to only get flake rates of failing tests.
|
||||
# 4) Sort failed test flake rates based on the flakiness of that test - stable tests should be first on the list.
|
||||
# 5) Store in file $TMP_FAILED_RATES.
|
||||
< "$TMP_FLAKE_RATES" sed -n -r -e "s/([a-zA-Z0-9_-]*),([a-zA-Z\/0-9_-]*),([.0-9]*),[.0-9]*/\1:\2,\3/p" \
|
||||
| sort -t, -k1,1 \
|
||||
| join -t , -j 1 "$TMP_DATA" - \
|
||||
| sort -g -t, -k2,2 \
|
||||
> "$TMP_FAILED_RATES"
|
||||
|
||||
FAILED_RATES_LINES=$(wc -l < "$TMP_FAILED_RATES")
|
||||
if [[ "$FAILED_RATES_LINES" -eq 0 ]]; then
|
||||
echo "No failed tests! Aborting without commenting..." 1>&2
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Create the comment template.
|
||||
TMP_COMMENT=$(mktemp)
|
||||
printf "These are the flake rates of all failed tests per %s.\n|Environment|Failed Tests|Flake Rate (%%)|\n|---|---|---|\n" "$ENVIRONMENT" > "$TMP_COMMENT"
|
||||
# 1) Get the first $MAX_REPORTED_TESTS lines.
|
||||
# 2) Print a row in the table with the environment, test name, flake rate, and a link to the flake chart for that test.
|
||||
# 3) Append these rows to file $TMP_COMMENT.
|
||||
< "$TMP_FAILED_RATES" head -n $MAX_REPORTED_TESTS \
|
||||
| sed -n -r -e "s/([a-zA-Z\/0-9_-]*):([a-zA-Z\/0-9_-]*),([.0-9]*)/|\1|\2|\3 ([chart](https:\/\/storage.googleapis.com\/minikube-flake-rate\/flake_chart.html?env=\1\&test=\2))|/p" \
|
||||
>> "$TMP_COMMENT"
|
||||
|
||||
# If there are too many failing tests, add an extra row explaining this, and a message after the table.
|
||||
if [[ "$FAILED_RATES_LINES" -gt 30 ]]; then
|
||||
printf "|More tests...|Continued...|\n\nToo many tests failed - See test logs for more details." >> "$TMP_COMMENT"
|
||||
fi
|
||||
|
||||
printf "\n\nTo see the flake rates of all tests on $ENVIRONMENT, click [here](https:\/\/storage.googleapis.com\/minikube-flake-rate\/flake_chart.html?env=$ENVIRONMENT)." >> "$TMP_COMMENT"
|
||||
|
||||
# install gh if not present
|
||||
$DIR/../installers/check_install_gh.sh
|
||||
|
||||
gh pr comment "https://github.com/kubernetes/minikube/pull/$PR_NUMBER" --body "$(cat $TMP_COMMENT)"
|
|
@ -0,0 +1,84 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is called once per integration test. If all integration tests that
|
||||
# have registered themselves in the started environment list have also
|
||||
# registered themselves in the finished environment list, this script reports
|
||||
# flakes or uploads flakes to flake data.
|
||||
#
|
||||
# This script expects the following env variables:
|
||||
# MINIKUBE_LOCATION: The Github location being run on (e.g. master, 11000).
|
||||
# COMMIT: Commit hash the tests ran on.
|
||||
# ROOT_JOB_ID: Job ID to use for synchronization.
|
||||
|
||||
set -o pipefail
|
||||
|
||||
BUCKET_PATH="gs://minikube-builds/logs/${MINIKUBE_LOCATION}/${COMMIT:0:7}"
|
||||
STARTED_LIST=$(gsutil cat "${BUCKET_PATH}/started_environments_${ROOT_JOB_ID}.txt" | sort | uniq)
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Unable to read environment list. Likely being run before all tests are ready or after tests have already been uploaded." 1>&2
|
||||
exit 0
|
||||
fi
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
FINISHED_LIST_REMOTE="${BUCKET_PATH}/finished_environments_${ROOT_JOB_ID}.txt"
|
||||
# Ensure FINISHED_LIST_REMOTE exists so we can append (but don't erase any existing entries in FINISHED_LIST_REMOTE)
|
||||
< /dev/null gsutil cp -n - "${FINISHED_LIST_REMOTE}"
|
||||
# Copy the job name to APPEND_TMP
|
||||
APPEND_TMP="${BUCKET_PATH}/$(basename $(mktemp))"
|
||||
echo "${UPSTREAM_JOB}"\
|
||||
| gsutil cp - "${APPEND_TMP}"
|
||||
# Append job name to remote finished list.
|
||||
gsutil compose "${FINISHED_LIST_REMOTE}" "${APPEND_TMP}" "${FINISHED_LIST_REMOTE}"
|
||||
gsutil rm "${APPEND_TMP}"
|
||||
|
||||
FINISHED_LIST=$(mktemp)
|
||||
gsutil cat "${FINISHED_LIST_REMOTE}"\
|
||||
| sort\
|
||||
| uniq > "${FINISHED_LIST}"
|
||||
|
||||
STARTED_COUNT=$(echo "${STARTED_LIST}" | wc -l)
|
||||
FINISHED_COUNT=$(\
|
||||
echo "${STARTED_LIST}"\
|
||||
| join - "${FINISHED_LIST}"\
|
||||
| wc -l)
|
||||
|
||||
if [ ${STARTED_COUNT} -ne ${FINISHED_COUNT} ]; then
|
||||
echo "Started environments are not all finished! Started: ${STARTED_LIST}, Finished: $(cat ${FINISHED_LIST}))"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Prevent other invocations of this script from uploading the same thing multiple times.
|
||||
gsutil rm "${BUCKET_PATH}/started_environments_${ROOT_JOB_ID}.txt"
|
||||
|
||||
# At this point, we know all integration tests are done and we can process all summaries safely.
|
||||
|
||||
# Get directory of this script.
|
||||
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
|
||||
if [[ "${MINIKUBE_LOCATION}" == "master" ]]; then
|
||||
for ENVIRONMENT in ${STARTED_LIST}; do
|
||||
SUMMARY="${BUCKET_PATH}/${ENVIRONMENT}_summary.json"
|
||||
"${DIR}/upload_tests.sh" "${SUMMARY}" || true
|
||||
done
|
||||
else
|
||||
"${DIR}/report_flakes.sh" "${MINIKUBE_LOCATION}" "${COMMIT:0:7}" "${FINISHED_LIST}"
|
||||
fi
|
||||
|
||||
gsutil rm "${BUCKET_PATH}/finished_environments_${ROOT_JOB_ID}.txt"
|
||||
rm "${FINISHED_LIST}"
|
|
@ -0,0 +1,44 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Takes a gopogh summary in a GCS bucket, extracts test data as a CSV and
|
||||
# appends to the existing CSV data in the flake rate GCS bucket.
|
||||
# Example usage: ./upload_tests.sh gs://some-bucket/gopogh_summary.json
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Wrong number of arguments. Usage: upload_tests.sh <gopogh_summary.json>" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TMP_DATA=$(mktemp)
|
||||
|
||||
# Use the gopogh summary, process it, optimize the data, remove the header, and store.
|
||||
gsutil cat "$1" \
|
||||
| ./test-flake-chart/process_data.sh \
|
||||
| ./test-flake-chart/optimize_data.sh \
|
||||
| sed "1d" > $TMP_DATA
|
||||
|
||||
GCS_TMP="gs://minikube-flake-rate/$(basename "$TMP_DATA")"
|
||||
|
||||
# Copy data to append to GCS
|
||||
gsutil cp $TMP_DATA $GCS_TMP
|
||||
# Append data to existing data.
|
||||
gsutil compose gs://minikube-flake-rate/data.csv $GCS_TMP gs://minikube-flake-rate/data.csv
|
||||
# Clear all the temp stuff.
|
||||
rm $TMP_DATA
|
||||
gsutil rm $GCS_TMP
|
|
@ -0,0 +1,21 @@
|
|||
# Copyright 2021 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
$test_home="$env:HOMEDRIVE$env:HOMEPATH\minikube-integration"
|
||||
$env:KUBECONFIG="$test_home\kubeconfig"
|
||||
$env:MINIKUBE_HOME="$test_home\.minikube"
|
||||
|
||||
# delete in case previous test was unexpectedly ended and teardown wasn't run
|
||||
rm -r -Force $test_home
|
||||
mkdir -p $test_home
|
|
@ -0,0 +1,17 @@
|
|||
# Copyright 2021 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
$test_home="$env:HOMEDRIVE$env:HOMEPATH\minikube-integration"
|
||||
|
||||
rm -r -Force $test_home
|
|
@ -21,6 +21,8 @@ gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/minikube-windows-am
|
|||
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/e2e-windows-amd64.exe out/
|
||||
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata .
|
||||
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/setup_docker_desktop_windows.ps1 out/
|
||||
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/windows_integration_setup.ps1 out/
|
||||
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/windows_integration_teardown.ps1 out/
|
||||
|
||||
$env:SHORT_COMMIT=$env:COMMIT.substring(0, 7)
|
||||
$gcs_bucket="minikube-builds/logs/$env:MINIKUBE_LOCATION/$env:SHORT_COMMIT"
|
||||
|
@ -37,12 +39,13 @@ If ($lastexitcode -gt 0) {
|
|||
Exit $lastexitcode
|
||||
}
|
||||
|
||||
# Remove unused images and containers
|
||||
docker system prune --all --force
|
||||
|
||||
|
||||
./out/minikube-windows-amd64.exe delete --all
|
||||
|
||||
# Remove unused images and containers
|
||||
docker system prune --all --force --volumes
|
||||
|
||||
./out/windows_integration_setup.ps1
|
||||
|
||||
docker ps -aq | ForEach -Process {docker rm -fv $_}
|
||||
|
||||
$started=Get-Date -UFormat %s
|
||||
|
@ -85,7 +88,6 @@ gsutil -qm cp testout.json gs://$gcs_bucket/Docker_Windows.json
|
|||
gsutil -qm cp testout.html gs://$gcs_bucket/Docker_Windows.html
|
||||
gsutil -qm cp testout_summary.json gs://$gcs_bucket/Docker_Windows_summary.json
|
||||
|
||||
|
||||
# Update the PR with the new info
|
||||
$json = "{`"state`": `"$env:status`", `"description`": `"Jenkins: $description`", `"target_url`": `"$env:target_url`", `"context`": `"Docker_Windows`"}"
|
||||
Invoke-WebRequest -Uri "https://api.github.com/repos/kubernetes/minikube/statuses/$env:COMMIT`?access_token=$env:access_token" -Body $json -ContentType "application/json" -Method Post -usebasicparsing
|
||||
|
@ -96,4 +98,6 @@ docker system prune --all --force
|
|||
# Just shutdown Docker, it's safer than anything else
|
||||
Get-Process "*Docker Desktop*" | Stop-Process
|
||||
|
||||
./out/windows_integration_teardown.ps1
|
||||
|
||||
Exit $env:result
|
||||
|
|
|
@ -18,9 +18,13 @@ mkdir -p out
|
|||
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/minikube-windows-amd64.exe out/
|
||||
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/e2e-windows-amd64.exe out/
|
||||
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata .
|
||||
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/windows_integration_setup.ps1 out/
|
||||
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/windows_integration_teardown.ps1 out/
|
||||
|
||||
./out/minikube-windows-amd64.exe delete --all
|
||||
|
||||
./out/windows_integration_setup.ps1
|
||||
|
||||
out/e2e-windows-amd64.exe -minikube-start-args="--driver=hyperv" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m
|
||||
$env:result=$lastexitcode
|
||||
# If the last exit code was 0->success, x>0->error
|
||||
|
@ -33,4 +37,6 @@ $env:target_url="https://storage.googleapis.com/minikube-builds/logs/$env:MINIKU
|
|||
$json = "{`"state`": `"$env:status`", `"description`": `"Jenkins`", `"target_url`": `"$env:target_url`", `"context`": `"Hyper-V_Windows`"}"
|
||||
Invoke-WebRequest -Uri "https://api.github.com/repos/kubernetes/minikube/statuses/$env:COMMIT`?access_token=$env:access_token" -Body $json -ContentType "application/json" -Method Post -usebasicparsing
|
||||
|
||||
Exit $env:result
|
||||
./out/windows_integration_teardown.ps1
|
||||
|
||||
Exit $env:result
|
||||
|
|
|
@ -19,9 +19,13 @@ mkdir -p out
|
|||
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/minikube-windows-amd64.exe out/
|
||||
gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/e2e-windows-amd64.exe out/
|
||||
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata .
|
||||
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/windows_integration_setup.ps1 out/
|
||||
gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/windows_integration_teardown.ps1 out/
|
||||
|
||||
./out/minikube-windows-amd64.exe delete
|
||||
|
||||
./out/windows_integration_setup.ps1
|
||||
|
||||
out/e2e-windows-amd64.exe -minikube-start-args="--driver=virtualbox" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=30m
|
||||
$env:result=$lastexitcode
|
||||
# If the last exit code was 0->success, x>0->error
|
||||
|
@ -34,4 +38,6 @@ $env:target_url="https://storage.googleapis.com/minikube-builds/logs/$env:MINIKU
|
|||
$json = "{`"state`": `"$env:status`", `"description`": `"Jenkins`", `"target_url`": `"$env:target_url`", `"context`": `"VirtualBox_Windows`"}"
|
||||
Invoke-WebRequest -Uri "https://api.github.com/repos/kubernetes/minikube/statuses/$env:COMMIT`?access_token=$env:access_token" -Body $json -ContentType "application/json" -Method Post -usebasicparsing
|
||||
|
||||
Exit $env:result
|
||||
./out/windows_integration_teardown.ps1
|
||||
|
||||
Exit $env:result
|
||||
|
|
|
@ -19,7 +19,7 @@ package main
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/go-github/github"
|
||||
"github.com/google/go-github/v36/github"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"golang.org/x/mod/semver"
|
||||
"golang.org/x/oauth2"
|
||||
|
||||
"github.com/google/go-github/v32/github"
|
||||
"github.com/google/go-github/v36/github"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
|
|
@ -45,29 +45,44 @@ const (
|
|||
|
||||
var (
|
||||
schema = map[string]update.Item{
|
||||
".github/workflows/iso.yml": {
|
||||
Replace: map[string]string{
|
||||
`go-version: '.*`: `go-version: '{{.StableVersion}}'`,
|
||||
},
|
||||
},
|
||||
".github/workflows/kic_image.yml": {
|
||||
Replace: map[string]string{
|
||||
`go-version: '.*`: `go-version: '{{.StableVersion}}'`,
|
||||
},
|
||||
},
|
||||
".github/workflows/build.yml": {
|
||||
Replace: map[string]string{
|
||||
`go-version: '.*`: `go-version: '{{.StableVersion}}'`,
|
||||
`GO_VERSION: '.*`: `GO_VERSION: '{{.StableVersion}}'`,
|
||||
},
|
||||
},
|
||||
".github/workflows/master.yml": {
|
||||
Replace: map[string]string{
|
||||
`go-version: '.*`: `go-version: '{{.StableVersion}}'`,
|
||||
`GO_VERSION: '.*`: `GO_VERSION: '{{.StableVersion}}'`,
|
||||
},
|
||||
},
|
||||
".github/workflows/pr.yml": {
|
||||
Replace: map[string]string{
|
||||
`go-version: '.*`: `go-version: '{{.StableVersion}}'`,
|
||||
`GO_VERSION: '.*`: `GO_VERSION: '{{.StableVersion}}'`,
|
||||
},
|
||||
},
|
||||
".github/workflows/docs.yml": {
|
||||
Replace: map[string]string{
|
||||
`GO_VERSION: '.*`: `GO_VERSION: '{{.StableVersion}}'`,
|
||||
},
|
||||
},
|
||||
".github/workflows/time-to-k8s.yml": {
|
||||
Replace: map[string]string{
|
||||
`GO_VERSION: '.*`: `GO_VERSION: '{{.StableVersion}}'`,
|
||||
},
|
||||
},
|
||||
".github/workflows/translations.yml": {
|
||||
Replace: map[string]string{
|
||||
`GO_VERSION: '.*`: `GO_VERSION: '{{.StableVersion}}'`,
|
||||
},
|
||||
},
|
||||
".github/workflows/update_k8s_versions.yml": {
|
||||
Replace: map[string]string{
|
||||
`GO_VERSION: '.*`: `GO_VERSION: '{{.StableVersion}}'`,
|
||||
},
|
||||
},
|
||||
".github/workflows/pr_verified.yaml": {
|
||||
Replace: map[string]string{
|
||||
`GO_VERSION: '.*`: `GO_VERSION: '{{.StableVersion}}'`,
|
||||
},
|
||||
},
|
||||
".travis.yml": {
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
# Copyright 2019 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ARG GO_VERSION
|
||||
|
||||
RUN apt update
|
||||
|
||||
RUN echo "deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports focal main universe multiverse" >> /etc/apt/sources.list && \
|
||||
echo "deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports focal-updates main universe restricted multiverse" >> /etc/apt/sources.list && \
|
||||
dpkg --add-architecture arm64 && \
|
||||
(apt update || true)
|
||||
|
||||
RUN DEBIAN_FRONTEND=noninteractive \
|
||||
apt install \
|
||||
-o APT::Immediate-Configure=false -y \
|
||||
gcc-aarch64-linux-gnu \
|
||||
make \
|
||||
pkg-config \
|
||||
curl \
|
||||
libvirt-dev:arm64 && \
|
||||
dpkg --configure -a
|
||||
|
||||
RUN curl -sSL https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz | tar -C /usr/local -xzf -
|
||||
|
||||
ENV GOPATH /go
|
||||
|
||||
ENV CC=aarch64-linux-gnu-gcc
|
||||
ENV CGO_ENABLED=1
|
||||
ENV PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig
|
||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/go/bin
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package addons
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
@ -26,11 +25,9 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/reason"
|
||||
"k8s.io/minikube/pkg/minikube/sysinit"
|
||||
)
|
||||
|
||||
|
@ -43,9 +40,6 @@ func enableOrDisableAutoPause(cc *config.ClusterConfig, name string, val string)
|
|||
out.Infof("auto-pause addon is an alpha feature and still in early development. Please file issues to help us make it better.")
|
||||
out.Infof("https://github.com/kubernetes/minikube/labels/co/auto-pause")
|
||||
|
||||
if cc.KubernetesConfig.ContainerRuntime != "docker" || runtime.GOARCH != "amd64" {
|
||||
exit.Message(reason.Usage, `auto-pause currently is only supported on docker runtime and amd64. Track progress of others here: https://github.com/kubernetes/minikube/issues/10601`)
|
||||
}
|
||||
co := mustload.Running(cc.Name)
|
||||
if enable {
|
||||
if err := sysinit.New(co.CP.Runner).EnableNow("auto-pause"); err != nil {
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
|
|
@ -24,13 +24,13 @@ import (
|
|||
|
||||
const (
|
||||
// Version is the current version of kic
|
||||
Version = "v0.0.22-1620785771-11384"
|
||||
Version = "v0.0.25"
|
||||
// SHA of the kic base image
|
||||
baseImageSHA = "f5844fe35994179bbad8dda27d4912304a2fedccdf0bf93ce8b2ec2b3b83af1c"
|
||||
baseImageSHA = "6f936e3443b95cd918d77623bf7b595653bb382766e280290a02b4a349e88b79"
|
||||
// The name of the GCR kicbase repository
|
||||
gcrRepo = "gcr.io/k8s-minikube/kicbase-builds"
|
||||
gcrRepo = "gcr.io/k8s-minikube/kicbase"
|
||||
// The name of the Dockerhub kicbase repository
|
||||
dockerhubRepo = "kicbase/build"
|
||||
dockerhubRepo = "kicbase/stable"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/deploy/addons"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -31,10 +32,11 @@ import (
|
|||
|
||||
// Addon is a named list of assets, that can be enabled
|
||||
type Addon struct {
|
||||
Assets []*BinAsset
|
||||
enabled bool
|
||||
addonName string
|
||||
Images map[string]string
|
||||
Assets []*BinAsset
|
||||
enabled bool
|
||||
addonName string
|
||||
Maintainer string
|
||||
Images map[string]string
|
||||
|
||||
// Registries currently only shows the default registry of images
|
||||
Registries map[string]string
|
||||
|
@ -47,11 +49,12 @@ type NetworkInfo struct {
|
|||
}
|
||||
|
||||
// NewAddon creates a new Addon
|
||||
func NewAddon(assets []*BinAsset, enabled bool, addonName string, images map[string]string, registries map[string]string) *Addon {
|
||||
func NewAddon(assets []*BinAsset, enabled bool, addonName string, maintainer string, images map[string]string, registries map[string]string) *Addon {
|
||||
a := &Addon{
|
||||
Assets: assets,
|
||||
enabled: enabled,
|
||||
addonName: addonName,
|
||||
Maintainer: maintainer,
|
||||
Images: images,
|
||||
Registries: registries,
|
||||
}
|
||||
|
@ -79,100 +82,105 @@ func (a *Addon) IsEnabled(cc *config.ClusterConfig) bool {
|
|||
var Addons = map[string]*Addon{
|
||||
"auto-pause": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/auto-pause/auto-pause.yaml.tmpl",
|
||||
addons.AutoPauseAssets,
|
||||
"auto-pause/auto-pause.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"auto-pause.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/auto-pause/auto-pause-hook.yaml.tmpl",
|
||||
addons.AutoPauseAssets,
|
||||
"auto-pause/auto-pause-hook.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"auto-pause-hook.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/auto-pause/haproxy.cfg.tmpl",
|
||||
addons.AutoPauseAssets,
|
||||
"auto-pause/haproxy.cfg.tmpl",
|
||||
vmpath.GuestPersistentDir,
|
||||
"haproxy.cfg",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/auto-pause/unpause.lua",
|
||||
addons.AutoPauseAssets,
|
||||
"auto-pause/unpause.lua",
|
||||
vmpath.GuestPersistentDir,
|
||||
"unpause.lua",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/auto-pause/auto-pause.service",
|
||||
addons.AutoPauseAssets,
|
||||
"auto-pause/auto-pause.service.tmpl",
|
||||
"/etc/systemd/system/",
|
||||
"auto-pause.service",
|
||||
"0640"),
|
||||
|
||||
// GuestPersistentDir
|
||||
}, false, "auto-pause", map[string]string{
|
||||
}, false, "auto-pause", "google", map[string]string{
|
||||
"AutoPauseHook": "k8s-minikube/auto-pause-hook:v0.0.2@sha256:c76be418df5ca9c66d0d11c2c68461acbf4072c1cdfc17e64729c5ef4d5a4128",
|
||||
}, map[string]string{
|
||||
"AutoPauseHook": "gcr.io",
|
||||
}),
|
||||
"dashboard": NewAddon([]*BinAsset{
|
||||
// We want to create the kubernetes-dashboard ns first so that every subsequent object can be created
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-ns.yaml", vmpath.GuestAddonsDir, "dashboard-ns.yaml", "0640"),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-clusterrole.yaml", vmpath.GuestAddonsDir, "dashboard-clusterrole.yaml", "0640"),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-clusterrolebinding.yaml", vmpath.GuestAddonsDir, "dashboard-clusterrolebinding.yaml", "0640"),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-configmap.yaml", vmpath.GuestAddonsDir, "dashboard-configmap.yaml", "0640"),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-dp.yaml.tmpl", vmpath.GuestAddonsDir, "dashboard-dp.yaml", "0640"),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-role.yaml", vmpath.GuestAddonsDir, "dashboard-role.yaml", "0640"),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-rolebinding.yaml", vmpath.GuestAddonsDir, "dashboard-rolebinding.yaml", "0640"),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-sa.yaml", vmpath.GuestAddonsDir, "dashboard-sa.yaml", "0640"),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-secret.yaml", vmpath.GuestAddonsDir, "dashboard-secret.yaml", "0640"),
|
||||
MustBinAsset("deploy/addons/dashboard/dashboard-svc.yaml", vmpath.GuestAddonsDir, "dashboard-svc.yaml", "0640"),
|
||||
}, false, "dashboard", map[string]string{
|
||||
MustBinAsset(addons.DashboardAssets, "dashboard/dashboard-ns.yaml", vmpath.GuestAddonsDir, "dashboard-ns.yaml", "0640"),
|
||||
MustBinAsset(addons.DashboardAssets, "dashboard/dashboard-clusterrole.yaml", vmpath.GuestAddonsDir, "dashboard-clusterrole.yaml", "0640"),
|
||||
MustBinAsset(addons.DashboardAssets, "dashboard/dashboard-clusterrolebinding.yaml", vmpath.GuestAddonsDir, "dashboard-clusterrolebinding.yaml", "0640"),
|
||||
MustBinAsset(addons.DashboardAssets, "dashboard/dashboard-configmap.yaml", vmpath.GuestAddonsDir, "dashboard-configmap.yaml", "0640"),
|
||||
MustBinAsset(addons.DashboardAssets, "dashboard/dashboard-dp.yaml.tmpl", vmpath.GuestAddonsDir, "dashboard-dp.yaml", "0640"),
|
||||
MustBinAsset(addons.DashboardAssets, "dashboard/dashboard-role.yaml", vmpath.GuestAddonsDir, "dashboard-role.yaml", "0640"),
|
||||
MustBinAsset(addons.DashboardAssets, "dashboard/dashboard-rolebinding.yaml", vmpath.GuestAddonsDir, "dashboard-rolebinding.yaml", "0640"),
|
||||
MustBinAsset(addons.DashboardAssets, "dashboard/dashboard-sa.yaml", vmpath.GuestAddonsDir, "dashboard-sa.yaml", "0640"),
|
||||
MustBinAsset(addons.DashboardAssets, "dashboard/dashboard-secret.yaml", vmpath.GuestAddonsDir, "dashboard-secret.yaml", "0640"),
|
||||
MustBinAsset(addons.DashboardAssets, "dashboard/dashboard-svc.yaml", vmpath.GuestAddonsDir, "dashboard-svc.yaml", "0640"),
|
||||
}, false, "dashboard", "kubernetes", map[string]string{
|
||||
"Dashboard": "kubernetesui/dashboard:v2.1.0@sha256:7f80b5ba141bead69c4fee8661464857af300d7d7ed0274cf7beecedc00322e6",
|
||||
"MetricsScraper": "kubernetesui/metrics-scraper:v1.0.4@sha256:555981a24f184420f3be0c79d4efb6c948a85cfce84034f85a563f4151a81cbf",
|
||||
}, nil),
|
||||
"default-storageclass": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/storageclass/storageclass.yaml.tmpl",
|
||||
MustBinAsset(addons.DefaultStorageClassAssets,
|
||||
"storageclass/storageclass.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"storageclass.yaml",
|
||||
"0640"),
|
||||
}, true, "default-storageclass", nil, nil),
|
||||
}, true, "default-storageclass", "kubernetes", nil, nil),
|
||||
"pod-security-policy": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/pod-security-policy/pod-security-policy.yaml.tmpl",
|
||||
MustBinAsset(addons.PodSecurityPolicyAssets,
|
||||
"pod-security-policy/pod-security-policy.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"pod-security-policy.yaml",
|
||||
"0640"),
|
||||
}, false, "pod-security-policy", nil, nil),
|
||||
}, false, "pod-security-policy", "", nil, nil),
|
||||
"storage-provisioner": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/storage-provisioner/storage-provisioner.yaml.tmpl",
|
||||
MustBinAsset(addons.StorageProvisionerAssets,
|
||||
"storage-provisioner/storage-provisioner.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"storage-provisioner.yaml",
|
||||
"0640"),
|
||||
}, true, "storage-provisioner", map[string]string{
|
||||
}, true, "storage-provisioner", "kubernetes", map[string]string{
|
||||
"StorageProvisioner": fmt.Sprintf("k8s-minikube/storage-provisioner:%s", version.GetStorageProvisionerVersion()),
|
||||
}, map[string]string{
|
||||
"StorageProvisioner": "gcr.io",
|
||||
}),
|
||||
"storage-provisioner-gluster": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/storage-provisioner-gluster/storage-gluster-ns.yaml.tmpl",
|
||||
MustBinAsset(addons.StorageProvisionerGlusterAssets,
|
||||
"storage-provisioner-gluster/storage-gluster-ns.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"storage-gluster-ns.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/storage-provisioner-gluster/glusterfs-daemonset.yaml.tmpl",
|
||||
MustBinAsset(addons.StorageProvisionerGlusterAssets,
|
||||
"storage-provisioner-gluster/glusterfs-daemonset.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"glusterfs-daemonset.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/storage-provisioner-gluster/heketi-deployment.yaml.tmpl",
|
||||
MustBinAsset(addons.StorageProvisionerGlusterAssets,
|
||||
"storage-provisioner-gluster/heketi-deployment.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"heketi-deployment.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/storage-provisioner-gluster/storage-provisioner-glusterfile.yaml.tmpl",
|
||||
MustBinAsset(addons.StorageProvisionerGlusterAssets,
|
||||
"storage-provisioner-gluster/storage-provisioner-glusterfile.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"storage-privisioner-glusterfile.yaml",
|
||||
"0640"),
|
||||
}, false, "storage-provisioner-gluster", map[string]string{
|
||||
}, false, "storage-provisioner-gluster", "", map[string]string{
|
||||
"Heketi": "heketi/heketi:10@sha256:76d5a6a3b7cf083d1e99efa1c15abedbc5c8b73bef3ade299ce9a4c16c9660f8",
|
||||
"GlusterfileProvisioner": "gluster/glusterfile-provisioner:latest@sha256:9961a35cb3f06701958e202324141c30024b195579e5eb1704599659ddea5223",
|
||||
"GlusterfsServer": "nixpanic/glusterfs-server:pr_fake-disk@sha256:3c58ae9d4e2007758954879d3f4095533831eb757c64ca6a0e32d1fc53fb6034",
|
||||
|
@ -180,37 +188,37 @@ var Addons = map[string]*Addon{
|
|||
"GlusterfsServer": "quay.io",
|
||||
}),
|
||||
"efk": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/efk/elasticsearch-rc.yaml.tmpl",
|
||||
MustBinAsset(addons.EfkAssets,
|
||||
"efk/elasticsearch-rc.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"elasticsearch-rc.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/efk/elasticsearch-svc.yaml.tmpl",
|
||||
MustBinAsset(addons.EfkAssets,
|
||||
"efk/elasticsearch-svc.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"elasticsearch-svc.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/efk/fluentd-es-rc.yaml.tmpl",
|
||||
MustBinAsset(addons.EfkAssets,
|
||||
"efk/fluentd-es-rc.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"fluentd-es-rc.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/efk/fluentd-es-configmap.yaml.tmpl",
|
||||
MustBinAsset(addons.EfkAssets,
|
||||
"efk/fluentd-es-configmap.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"fluentd-es-configmap.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/efk/kibana-rc.yaml.tmpl",
|
||||
MustBinAsset(addons.EfkAssets,
|
||||
"efk/kibana-rc.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"kibana-rc.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/efk/kibana-svc.yaml.tmpl",
|
||||
MustBinAsset(addons.EfkAssets,
|
||||
"efk/kibana-svc.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"kibana-svc.yaml",
|
||||
"0640"),
|
||||
}, false, "efk", map[string]string{
|
||||
}, false, "efk", "", map[string]string{
|
||||
"Elasticsearch": "elasticsearch:v5.6.2@sha256:7e95b32a7a2aad0c0db5c881e4a1ce8b7e53236144ae9d9cfb5fbe5608af4ab2",
|
||||
"FluentdElasticsearch": "fluentd-elasticsearch:v2.0.2@sha256:d0480bbf2d0de2344036fa3f7034cf7b4b98025a89c71d7f1f1845ac0e7d5a97",
|
||||
"Alpine": "alpine:3.6@sha256:66790a2b79e1ea3e1dabac43990c54aca5d1ddf268d9a5a0285e4167c8b24475",
|
||||
|
@ -221,22 +229,22 @@ var Addons = map[string]*Addon{
|
|||
"Kibana": "docker.elastic.co",
|
||||
}),
|
||||
"ingress": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/ingress/ingress-configmap.yaml.tmpl",
|
||||
MustBinAsset(addons.IngressAssets,
|
||||
"ingress/ingress-configmap.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"ingress-configmap.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/ingress/ingress-rbac.yaml.tmpl",
|
||||
MustBinAsset(addons.IngressAssets,
|
||||
"ingress/ingress-rbac.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"ingress-rbac.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/ingress/ingress-dp.yaml.tmpl",
|
||||
MustBinAsset(addons.IngressAssets,
|
||||
"ingress/ingress-dp.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"ingress-dp.yaml",
|
||||
"0640"),
|
||||
}, false, "ingress", map[string]string{
|
||||
}, false, "ingress", "", map[string]string{
|
||||
"IngressController": "ingress-nginx/controller:v0.44.0@sha256:3dd0fac48073beaca2d67a78c746c7593f9c575168a17139a9955a82c63c4b9a",
|
||||
"KubeWebhookCertgenCreate": "docker.io/jettech/kube-webhook-certgen:v1.5.1@sha256:950833e19ade18cd389d647efb88992a7cc077abedef343fa59e012d376d79b7",
|
||||
"KubeWebhookCertgenPatch": "docker.io/jettech/kube-webhook-certgen:v1.5.1@sha256:950833e19ade18cd389d647efb88992a7cc077abedef343fa59e012d376d79b7",
|
||||
|
@ -244,68 +252,68 @@ var Addons = map[string]*Addon{
|
|||
"IngressController": "k8s.gcr.io",
|
||||
}),
|
||||
"istio-provisioner": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/istio-provisioner/istio-operator.yaml.tmpl",
|
||||
MustBinAsset(addons.IstioProvisionerAssets,
|
||||
"istio-provisioner/istio-operator.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"istio-operator.yaml",
|
||||
"0640"),
|
||||
}, false, "istio-provisioner", map[string]string{
|
||||
}, false, "istio-provisioner", "", map[string]string{
|
||||
"IstioOperator": "istio/operator:1.5.0@sha256:25a6398ed4996a5313767ceb63768d503c266f63506ad3074b30eef6b5b5167e",
|
||||
}, nil),
|
||||
"istio": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/istio/istio-default-profile.yaml.tmpl",
|
||||
MustBinAsset(addons.IstioAssets,
|
||||
"istio/istio-default-profile.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"istio-default-profile.yaml",
|
||||
"0640"),
|
||||
}, false, "istio", nil, nil),
|
||||
}, false, "istio", "", nil, nil),
|
||||
"kubevirt": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/kubevirt/pod.yaml.tmpl",
|
||||
MustBinAsset(addons.KubevirtAssets,
|
||||
"kubevirt/pod.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"pod.yaml",
|
||||
"0640"),
|
||||
}, false, "kubevirt", map[string]string{
|
||||
}, false, "kubevirt", "", map[string]string{
|
||||
"Kubectl": "bitnami/kubectl:1.17@sha256:de642e973d3d0ef60e4d0a1f92286a9fdae245535c5990d4762bbe86fcf95887",
|
||||
}, nil),
|
||||
"metrics-server": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/metrics-server/metrics-apiservice.yaml.tmpl",
|
||||
MustBinAsset(addons.MetricsServerAssets,
|
||||
"metrics-server/metrics-apiservice.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"metrics-apiservice.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/metrics-server/metrics-server-deployment.yaml.tmpl",
|
||||
MustBinAsset(addons.MetricsServerAssets,
|
||||
"metrics-server/metrics-server-deployment.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"metrics-server-deployment.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/metrics-server/metrics-server-rbac.yaml.tmpl",
|
||||
MustBinAsset(addons.MetricsServerAssets,
|
||||
"metrics-server/metrics-server-rbac.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"metrics-server-rbac.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/metrics-server/metrics-server-service.yaml.tmpl",
|
||||
MustBinAsset(addons.MetricsServerAssets,
|
||||
"metrics-server/metrics-server-service.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"metrics-server-service.yaml",
|
||||
"0640"),
|
||||
}, false, "metrics-server", map[string]string{
|
||||
}, false, "metrics-server", "kubernetes", map[string]string{
|
||||
"MetricsServer": "metrics-server/metrics-server:v0.4.2@sha256:dbc33d7d35d2a9cc5ab402005aa7a0d13be6192f3550c7d42cba8d2d5e3a5d62",
|
||||
}, map[string]string{
|
||||
"MetricsServer": "k8s.gcr.io",
|
||||
}),
|
||||
"olm": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/olm/crds.yaml.tmpl",
|
||||
MustBinAsset(addons.OlmAssets,
|
||||
"olm/crds.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"crds.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/olm/olm.yaml.tmpl",
|
||||
MustBinAsset(addons.OlmAssets,
|
||||
"olm/olm.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"olm.yaml",
|
||||
"0640"),
|
||||
}, false, "olm", map[string]string{
|
||||
}, false, "olm", "", map[string]string{
|
||||
"OLM": "operator-framework/olm:v0.17.0@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607",
|
||||
"UpstreamCommunityOperators": "operator-framework/upstream-community-operators:07bbc13@sha256:cc7b3fdaa1ccdea5866fcd171669dc0ed88d3477779d8ed32e3712c827e38cc0",
|
||||
}, map[string]string{
|
||||
|
@ -313,63 +321,63 @@ var Addons = map[string]*Addon{
|
|||
"UpstreamCommunityOperators": "quay.io",
|
||||
}),
|
||||
"registry": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/registry/registry-rc.yaml.tmpl",
|
||||
MustBinAsset(addons.RegistryAssets,
|
||||
"registry/registry-rc.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"registry-rc.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/registry/registry-svc.yaml.tmpl",
|
||||
MustBinAsset(addons.RegistryAssets,
|
||||
"registry/registry-svc.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"registry-svc.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/registry/registry-proxy.yaml.tmpl",
|
||||
MustBinAsset(addons.RegistryAssets,
|
||||
"registry/registry-proxy.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"registry-proxy.yaml",
|
||||
"0640"),
|
||||
}, false, "registry", map[string]string{
|
||||
}, false, "registry", "google", map[string]string{
|
||||
"Registry": "registry:2.7.1@sha256:d5459fcb27aecc752520df4b492b08358a1912fcdfa454f7d2101d4b09991daa",
|
||||
"KubeRegistryProxy": "google_containers/kube-registry-proxy:0.4@sha256:1040f25a5273de0d72c54865a8efd47e3292de9fb8e5353e3fa76736b854f2da",
|
||||
}, map[string]string{
|
||||
"KubeRegistryProxy": "gcr.io",
|
||||
}),
|
||||
"registry-creds": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/registry-creds/registry-creds-rc.yaml.tmpl",
|
||||
MustBinAsset(addons.RegistryCredsAssets,
|
||||
"registry-creds/registry-creds-rc.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"registry-creds-rc.yaml",
|
||||
"0640"),
|
||||
}, false, "registry-creds", map[string]string{
|
||||
}, false, "registry-creds", "", map[string]string{
|
||||
"RegistryCreds": "upmcenterprises/registry-creds:1.10@sha256:93a633d4f2b76a1c66bf19c664dbddc56093a543de6d54320f19f585ccd7d605",
|
||||
}, nil),
|
||||
"registry-aliases": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/registry-aliases/registry-aliases-sa.tmpl",
|
||||
MustBinAsset(addons.RegistryAliasesAssets,
|
||||
"registry-aliases/registry-aliases-sa.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"registry-aliases-sa.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/registry-aliases/registry-aliases-sa-crb.tmpl",
|
||||
MustBinAsset(addons.RegistryAliasesAssets,
|
||||
"registry-aliases/registry-aliases-sa-crb.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"registry-aliases-sa-crb.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/registry-aliases/registry-aliases-config.tmpl",
|
||||
MustBinAsset(addons.RegistryAliasesAssets,
|
||||
"registry-aliases/registry-aliases-config.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"registry-aliases-config.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/registry-aliases/node-etc-hosts-update.tmpl",
|
||||
MustBinAsset(addons.RegistryAliasesAssets,
|
||||
"registry-aliases/node-etc-hosts-update.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"node-etc-hosts-update.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/registry-aliases/patch-coredns-job.tmpl",
|
||||
MustBinAsset(addons.RegistryAliasesAssets,
|
||||
"registry-aliases/patch-coredns-job.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"patch-coredns-job.yaml",
|
||||
"0640"),
|
||||
}, false, "registry-aliases", map[string]string{
|
||||
}, false, "registry-aliases", "", map[string]string{
|
||||
"CoreDNSPatcher": "rhdevelopers/core-dns-patcher@sha256:9220ff32f690c3d889a52afb59ca6fcbbdbd99e5370550cc6fd249adea8ed0a9",
|
||||
"Alpine": "alpine:3.11@sha256:0bd0e9e03a022c3b0226667621da84fc9bf562a9056130424b5bfbd8bcb0397f",
|
||||
"Pause": "google_containers/pause:3.1@sha256:f78411e19d84a252e53bff71a4407a5686c46983a2c2eeed83929b888179acea",
|
||||
|
@ -378,23 +386,23 @@ var Addons = map[string]*Addon{
|
|||
"Pause": "gcr.io",
|
||||
}),
|
||||
"freshpod": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/freshpod/freshpod-rc.yaml.tmpl",
|
||||
MustBinAsset(addons.FreshpodAssets,
|
||||
"freshpod/freshpod-rc.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"freshpod-rc.yaml",
|
||||
"0640"),
|
||||
}, false, "freshpod", map[string]string{
|
||||
}, false, "freshpod", "google", map[string]string{
|
||||
"FreshPod": "google-samples/freshpod:v0.0.1@sha256:b9efde5b509da3fd2959519c4147b653d0c5cefe8a00314e2888e35ecbcb46f9",
|
||||
}, map[string]string{
|
||||
"FreshPod": "gcr.io",
|
||||
}),
|
||||
"nvidia-driver-installer": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/gpu/nvidia-driver-installer.yaml.tmpl",
|
||||
MustBinAsset(addons.NvidiaDriverInstallerAssets,
|
||||
"gpu/nvidia-driver-installer.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"nvidia-driver-installer.yaml",
|
||||
"0640"),
|
||||
}, false, "nvidia-driver-installer", map[string]string{
|
||||
}, false, "nvidia-driver-installer", "google", map[string]string{
|
||||
"NvidiaDriverInstaller": "minikube-nvidia-driver-installer:e2d9b43228decf5d6f7dce3f0a85d390f138fa01",
|
||||
"Pause": "pause:2.0@sha256:9ce5316f9752b8347484ab0f6778573af15524124d52b93230b9a0dcc987e73e",
|
||||
}, map[string]string{
|
||||
|
@ -402,242 +410,242 @@ var Addons = map[string]*Addon{
|
|||
"Pause": "k8s.gcr.io",
|
||||
}),
|
||||
"nvidia-gpu-device-plugin": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/gpu/nvidia-gpu-device-plugin.yaml.tmpl",
|
||||
MustBinAsset(addons.NvidiaGpuDevicePluginAssets,
|
||||
"gpu/nvidia-gpu-device-plugin.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"nvidia-gpu-device-plugin.yaml",
|
||||
"0640"),
|
||||
}, false, "nvidia-gpu-device-plugin", map[string]string{
|
||||
}, false, "nvidia-gpu-device-plugin", "", map[string]string{
|
||||
"NvidiaDevicePlugin": "nvidia/k8s-device-plugin:1.0.0-beta4@sha256:94d46bf513cbc43c4d77a364e4bbd409d32d89c8e686e12551cc3eb27c259b90",
|
||||
}, nil),
|
||||
"logviewer": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/logviewer/logviewer-dp-and-svc.yaml.tmpl",
|
||||
MustBinAsset(addons.LogviewerAssets,
|
||||
"logviewer/logviewer-dp-and-svc.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"logviewer-dp-and-svc.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/logviewer/logviewer-rbac.yaml.tmpl",
|
||||
MustBinAsset(addons.LogviewerAssets,
|
||||
"logviewer/logviewer-rbac.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"logviewer-rbac.yaml",
|
||||
"0640"),
|
||||
}, false, "logviewer", map[string]string{
|
||||
}, false, "logviewer", "google", map[string]string{
|
||||
"LogViewer": "ivans3/minikube-log-viewer:latest@sha256:75854f45305cc47d17b04c6c588fa60777391761f951e3a34161ddf1f1b06405",
|
||||
}, nil),
|
||||
"gvisor": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/gvisor/gvisor-pod.yaml.tmpl",
|
||||
MustBinAsset(addons.GvisorAssets,
|
||||
"gvisor/gvisor-pod.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"gvisor-pod.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/gvisor/gvisor-runtimeclass.yaml.tmpl",
|
||||
MustBinAsset(addons.GvisorAssets,
|
||||
"gvisor/gvisor-runtimeclass.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"gvisor-runtimeclass.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/gvisor/gvisor-config.toml",
|
||||
MustBinAsset(addons.GvisorAssets,
|
||||
"gvisor/gvisor-config.toml",
|
||||
vmpath.GuestGvisorDir,
|
||||
constants.GvisorConfigTomlTargetName,
|
||||
"0640"),
|
||||
}, false, "gvisor", map[string]string{
|
||||
}, false, "gvisor", "google", map[string]string{
|
||||
"GvisorAddon": "k8s-minikube/gvisor-addon:3@sha256:23eb17d48a66fc2b09c31454fb54ecae520c3e9c9197ef17fcb398b4f31d505a",
|
||||
}, map[string]string{
|
||||
"GvisorAddon": "gcr.io",
|
||||
}),
|
||||
"helm-tiller": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/helm-tiller/helm-tiller-dp.tmpl",
|
||||
MustBinAsset(addons.HelmTillerAssets,
|
||||
"helm-tiller/helm-tiller-dp.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"helm-tiller-dp.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/helm-tiller/helm-tiller-rbac.tmpl",
|
||||
MustBinAsset(addons.HelmTillerAssets,
|
||||
"helm-tiller/helm-tiller-rbac.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"helm-tiller-rbac.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/helm-tiller/helm-tiller-svc.tmpl",
|
||||
MustBinAsset(addons.HelmTillerAssets,
|
||||
"helm-tiller/helm-tiller-svc.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"helm-tiller-svc.yaml",
|
||||
"0640"),
|
||||
}, false, "helm-tiller", map[string]string{
|
||||
}, false, "helm-tiller", "", map[string]string{
|
||||
"Tiller": "kubernetes-helm/tiller:v2.16.12@sha256:6003775d503546087266eda39418d221f9afb5ccfe35f637c32a1161619a3f9c",
|
||||
}, map[string]string{
|
||||
"Tiller": "gcr.io",
|
||||
}),
|
||||
"ingress-dns": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/ingress-dns/ingress-dns-pod.yaml.tmpl",
|
||||
MustBinAsset(addons.IngressDNSAssets,
|
||||
"ingress-dns/ingress-dns-pod.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"ingress-dns-pod.yaml",
|
||||
"0640"),
|
||||
}, false, "ingress-dns", map[string]string{
|
||||
}, false, "ingress-dns", "", map[string]string{
|
||||
"IngressDNS": "cryptexlabs/minikube-ingress-dns:0.3.0@sha256:e252d2a4c704027342b303cc563e95d2e71d2a0f1404f55d676390e28d5093ab",
|
||||
}, nil),
|
||||
"metallb": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/metallb/metallb.yaml.tmpl",
|
||||
MustBinAsset(addons.MetallbAssets,
|
||||
"metallb/metallb.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"metallb.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/metallb/metallb-config.yaml.tmpl",
|
||||
MustBinAsset(addons.MetallbAssets,
|
||||
"metallb/metallb-config.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"metallb-config.yaml",
|
||||
"0640"),
|
||||
}, false, "metallb", map[string]string{
|
||||
}, false, "metallb", "", map[string]string{
|
||||
"Speaker": "metallb/speaker:v0.9.6@sha256:c66585a805bed1a3b829d8fb4a4aab9d87233497244ebff96f1b88f1e7f8f991",
|
||||
"Controller": "metallb/controller:v0.9.6@sha256:fbfdb9d3f55976b0ee38f3309d83a4ca703efcf15d6ca7889cd8189142286502",
|
||||
}, nil),
|
||||
"ambassador": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/ambassador/ambassador-operator-crds.yaml.tmpl",
|
||||
MustBinAsset(addons.AmbassadorAssets,
|
||||
"ambassador/ambassador-operator-crds.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"ambassador-operator-crds.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/ambassador/ambassador-operator.yaml.tmpl",
|
||||
MustBinAsset(addons.AmbassadorAssets,
|
||||
"ambassador/ambassador-operator.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"ambassador-operator.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/ambassador/ambassadorinstallation.yaml.tmpl",
|
||||
MustBinAsset(addons.AmbassadorAssets,
|
||||
"ambassador/ambassadorinstallation.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"ambassadorinstallation.yaml",
|
||||
"0640"),
|
||||
}, false, "ambassador", map[string]string{
|
||||
}, false, "ambassador", "", map[string]string{
|
||||
"AmbassadorOperator": "datawire/ambassador-operator:v1.2.3@sha256:492f33e0828a371aa23331d75c11c251b21499e31287f026269e3f6ec6da34ed",
|
||||
}, map[string]string{
|
||||
"AmbassadorOperator": "quay.io",
|
||||
}),
|
||||
"gcp-auth": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/gcp-auth/gcp-auth-ns.yaml.tmpl",
|
||||
MustBinAsset(addons.GcpAuthAssets,
|
||||
"gcp-auth/gcp-auth-ns.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"gcp-auth-ns.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/gcp-auth/gcp-auth-service.yaml.tmpl",
|
||||
MustBinAsset(addons.GcpAuthAssets,
|
||||
"gcp-auth/gcp-auth-service.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"gcp-auth-service.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/gcp-auth/gcp-auth-webhook.yaml.tmpl.tmpl",
|
||||
MustBinAsset(addons.GcpAuthAssets,
|
||||
"gcp-auth/gcp-auth-webhook.yaml.tmpl.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"gcp-auth-webhook.yaml",
|
||||
"0640"),
|
||||
}, false, "gcp-auth", map[string]string{
|
||||
}, false, "gcp-auth", "google", map[string]string{
|
||||
"KubeWebhookCertgen": "jettech/kube-webhook-certgen:v1.3.0@sha256:ff01fba91131ed260df3f3793009efbf9686f5a5ce78a85f81c386a4403f7689",
|
||||
"GCPAuthWebhook": "k8s-minikube/gcp-auth-webhook:v0.0.5@sha256:4da26a6937e876c80642c98fed9efb2269a5d2cb55029de9e2685c9fd6bc1add",
|
||||
"GCPAuthWebhook": "k8s-minikube/gcp-auth-webhook:v0.0.6@sha256:c407ad6ee97d8a0e8a21c713e2d9af66aaf73315e4a123874c00b786f962f3cd",
|
||||
}, map[string]string{
|
||||
"GCPAuthWebhook": "gcr.io",
|
||||
}),
|
||||
"volumesnapshots": NewAddon([]*BinAsset{
|
||||
// make sure the order of apply. `csi-hostpath-snapshotclass` must be the first position, because it depends on `snapshot.storage.k8s.io_volumesnapshotclasses`
|
||||
// if user disable volumesnapshots addon and delete `csi-hostpath-snapshotclass` after `snapshot.storage.k8s.io_volumesnapshotclasses`, kubernetes will return the error
|
||||
MustBinAsset(
|
||||
"deploy/addons/volumesnapshots/csi-hostpath-snapshotclass.yaml.tmpl",
|
||||
MustBinAsset(addons.VolumeSnapshotsAssets,
|
||||
"volumesnapshots/csi-hostpath-snapshotclass.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"csi-hostpath-snapshotclass.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml.tmpl",
|
||||
MustBinAsset(addons.VolumeSnapshotsAssets,
|
||||
"volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"snapshot.storage.k8s.io_volumesnapshotclasses.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml.tmpl",
|
||||
MustBinAsset(addons.VolumeSnapshotsAssets,
|
||||
"volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"snapshot.storage.k8s.io_volumesnapshotcontents.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml.tmpl",
|
||||
MustBinAsset(addons.VolumeSnapshotsAssets,
|
||||
"volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"snapshot.storage.k8s.io_volumesnapshots.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/volumesnapshots/rbac-volume-snapshot-controller.yaml.tmpl",
|
||||
MustBinAsset(addons.VolumeSnapshotsAssets,
|
||||
"volumesnapshots/rbac-volume-snapshot-controller.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"rbac-volume-snapshot-controller.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/volumesnapshots/volume-snapshot-controller-deployment.yaml.tmpl",
|
||||
MustBinAsset(addons.VolumeSnapshotsAssets,
|
||||
"volumesnapshots/volume-snapshot-controller-deployment.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"volume-snapshot-controller-deployment.yaml",
|
||||
"0640"),
|
||||
}, false, "volumesnapshots", map[string]string{
|
||||
}, false, "volumesnapshots", "kubernetes", map[string]string{
|
||||
"SnapshotController": "sig-storage/snapshot-controller:v4.0.0@sha256:00fcc441ea9f72899c25eed61d602272a2a58c5f0014332bdcb5ac24acef08e4",
|
||||
}, map[string]string{
|
||||
"SnapshotController": "k8s.gcr.io",
|
||||
}),
|
||||
"csi-hostpath-driver": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-attacher.yaml.tmpl",
|
||||
MustBinAsset(addons.CsiHostpathDriverAssets,
|
||||
"csi-hostpath-driver/rbac/rbac-external-attacher.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"rbac-external-attacher.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-health-monitor-agent.yaml.tmpl",
|
||||
MustBinAsset(addons.CsiHostpathDriverAssets,
|
||||
"csi-hostpath-driver/rbac/rbac-external-health-monitor-agent.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"rbac-external-health-monitor-agent.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml.tmpl",
|
||||
MustBinAsset(addons.CsiHostpathDriverAssets,
|
||||
"csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"rbac-external-health-monitor-controller.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-provisioner.yaml.tmpl",
|
||||
MustBinAsset(addons.CsiHostpathDriverAssets,
|
||||
"csi-hostpath-driver/rbac/rbac-external-provisioner.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"rbac-external-provisioner.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-resizer.yaml.tmpl",
|
||||
MustBinAsset(addons.CsiHostpathDriverAssets,
|
||||
"csi-hostpath-driver/rbac/rbac-external-resizer.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"rbac-external-resizer.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml.tmpl",
|
||||
MustBinAsset(addons.CsiHostpathDriverAssets,
|
||||
"csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"rbac-external-snapshotter.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-attacher.yaml.tmpl",
|
||||
MustBinAsset(addons.CsiHostpathDriverAssets,
|
||||
"csi-hostpath-driver/deploy/csi-hostpath-attacher.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"csi-hostpath-attacher.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml.tmpl",
|
||||
MustBinAsset(addons.CsiHostpathDriverAssets,
|
||||
"csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"csi-hostpath-driverinfo.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-plugin.yaml.tmpl",
|
||||
MustBinAsset(addons.CsiHostpathDriverAssets,
|
||||
"csi-hostpath-driver/deploy/csi-hostpath-plugin.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"csi-hostpath-plugin.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-provisioner.yaml.tmpl",
|
||||
MustBinAsset(addons.CsiHostpathDriverAssets,
|
||||
"csi-hostpath-driver/deploy/csi-hostpath-provisioner.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"csi-hostpath-provisioner.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-resizer.yaml.tmpl",
|
||||
MustBinAsset(addons.CsiHostpathDriverAssets,
|
||||
"csi-hostpath-driver/deploy/csi-hostpath-resizer.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"csi-hostpath-resizer.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-snapshotter.yaml.tmpl",
|
||||
MustBinAsset(addons.CsiHostpathDriverAssets,
|
||||
"csi-hostpath-driver/deploy/csi-hostpath-snapshotter.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"csi-hostpath-snapshotter.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(
|
||||
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml.tmpl",
|
||||
MustBinAsset(addons.CsiHostpathDriverAssets,
|
||||
"csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"csi-hostpath-storageclass.yaml",
|
||||
"0640"),
|
||||
}, false, "csi-hostpath-driver", map[string]string{
|
||||
}, false, "csi-hostpath-driver", "kubernetes", map[string]string{
|
||||
"Attacher": "sig-storage/csi-attacher:v3.1.0@sha256:50c3cfd458fc8e0bf3c8c521eac39172009382fc66dc5044a330d137c6ed0b09",
|
||||
"HostMonitorAgent": "sig-storage/csi-external-health-monitor-agent:v0.2.0@sha256:c20d4a4772599e68944452edfcecc944a1df28c19e94b942d526ca25a522ea02",
|
||||
"HostMonitorController": "sig-storage/csi-external-health-monitor-controller:v0.2.0@sha256:14988b598a180cc0282f3f4bc982371baf9a9c9b80878fb385f8ae8bd04ecf16",
|
||||
|
@ -780,6 +788,7 @@ func GenerateTemplateData(addon *Addon, cfg config.KubernetesConfig, netInfo Net
|
|||
LoadBalancerStartIP string
|
||||
LoadBalancerEndIP string
|
||||
CustomIngressCert string
|
||||
ContainerRuntime string
|
||||
Images map[string]string
|
||||
Registries map[string]string
|
||||
CustomRegistries map[string]string
|
||||
|
@ -791,6 +800,7 @@ func GenerateTemplateData(addon *Addon, cfg config.KubernetesConfig, netInfo Net
|
|||
LoadBalancerStartIP: cfg.LoadBalancerStartIP,
|
||||
LoadBalancerEndIP: cfg.LoadBalancerEndIP,
|
||||
CustomIngressCert: cfg.CustomIngressCert,
|
||||
ContainerRuntime: cfg.ContainerRuntime,
|
||||
Images: images,
|
||||
Registries: addon.Registries,
|
||||
CustomRegistries: customRegistries,
|
||||
|
|
|
@ -18,6 +18,7 @@ package assets
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"embed"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
|
@ -207,6 +208,7 @@ func NewMemoryAsset(d []byte, targetDir, targetName, permissions string) *Memory
|
|||
|
||||
// BinAsset is a bindata (binary data) asset
|
||||
type BinAsset struct {
|
||||
embed.FS
|
||||
BaseAsset
|
||||
reader io.ReadSeeker
|
||||
template *template.Template
|
||||
|
@ -214,8 +216,8 @@ type BinAsset struct {
|
|||
}
|
||||
|
||||
// MustBinAsset creates a new BinAsset, or panics if invalid
|
||||
func MustBinAsset(name, targetDir, targetName, permissions string) *BinAsset {
|
||||
asset, err := NewBinAsset(name, targetDir, targetName, permissions)
|
||||
func MustBinAsset(fs embed.FS, name, targetDir, targetName, permissions string) *BinAsset {
|
||||
asset, err := NewBinAsset(fs, name, targetDir, targetName, permissions)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to define asset %s: %v", name, err))
|
||||
}
|
||||
|
@ -223,8 +225,9 @@ func MustBinAsset(name, targetDir, targetName, permissions string) *BinAsset {
|
|||
}
|
||||
|
||||
// NewBinAsset creates a new BinAsset
|
||||
func NewBinAsset(name, targetDir, targetName, permissions string) (*BinAsset, error) {
|
||||
func NewBinAsset(fs embed.FS, name, targetDir, targetName, permissions string) (*BinAsset, error) {
|
||||
m := &BinAsset{
|
||||
FS: fs,
|
||||
BaseAsset: BaseAsset{
|
||||
SourcePath: name,
|
||||
TargetDir: targetDir,
|
||||
|
@ -249,7 +252,7 @@ func defaultValue(defValue string, val interface{}) string {
|
|||
}
|
||||
|
||||
func (m *BinAsset) loadData() error {
|
||||
contents, err := Asset(m.SourcePath)
|
||||
contents, err := m.FS.ReadFile(m.SourcePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/ktmpl"
|
||||
|
|
|
@ -143,6 +143,22 @@ func APIServerVersionMatch(client *kubernetes.Clientset, expected string) error
|
|||
return nil
|
||||
}
|
||||
|
||||
// WaitForAPIServerStatus waits for 'to' duration to get apiserver pod running or stopped
|
||||
// this functions is intended to use in situations where apiserver process can be recreated
|
||||
// by container runtime restart for example and there is a gap before it comes back
|
||||
func WaitForAPIServerStatus(cr command.Runner, to time.Duration, hostname string, port int) (state.State, error) {
|
||||
var st state.State
|
||||
err := wait.PollImmediate(200*time.Millisecond, to, func() (bool, error) {
|
||||
var err error
|
||||
st, err = APIServerStatus(cr, hostname, port)
|
||||
if st == state.Stopped {
|
||||
return false, nil
|
||||
}
|
||||
return true, err
|
||||
})
|
||||
return st, err
|
||||
}
|
||||
|
||||
// APIServerStatus returns apiserver status in libmachine style state.State
|
||||
func APIServerStatus(cr command.Runner, hostname string, port int) (state.State, error) {
|
||||
klog.Infof("Checking apiserver status ...")
|
||||
|
@ -207,7 +223,7 @@ func apiServerHealthz(hostname string, port int) (state.State, error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
err = retry.Local(check, 5*time.Second)
|
||||
err = retry.Local(check, 15*time.Second)
|
||||
|
||||
// Don't propagate 'Stopped' upwards as an error message, as clients may interpret the err
|
||||
// as an inability to get status. We need it for retry.Local, however.
|
||||
|
@ -232,7 +248,7 @@ func apiServerHealthzNow(hostname string, port int) (state.State, error) {
|
|||
Proxy: nil, // Avoid using a proxy to speak to a local host
|
||||
TLSClientConfig: &tls.Config{RootCAs: pool},
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
client := &http.Client{Transport: tr, Timeout: 5 * time.Second}
|
||||
resp, err := client.Get(url)
|
||||
// Connection refused, usually.
|
||||
if err != nil {
|
||||
|
|
|
@ -40,7 +40,7 @@ etcd:
|
|||
dataDir: /var/lib/minikube/etcd
|
||||
extraArgs:
|
||||
proxy-refresh-interval: "70000"
|
||||
kubernetesVersion: v1.22.0-alpha.2
|
||||
kubernetesVersion: v1.22.0-beta.0
|
||||
networking:
|
||||
dnsDomain: cluster.local
|
||||
podSubnet: "10.244.0.0/16"
|
||||
|
|
|
@ -40,7 +40,7 @@ etcd:
|
|||
dataDir: /var/lib/minikube/etcd
|
||||
extraArgs:
|
||||
proxy-refresh-interval: "70000"
|
||||
kubernetesVersion: v1.22.0-alpha.2
|
||||
kubernetesVersion: v1.22.0-beta.0
|
||||
networking:
|
||||
dnsDomain: cluster.local
|
||||
podSubnet: "192.168.32.0/20"
|
||||
|
|
|
@ -40,7 +40,7 @@ etcd:
|
|||
dataDir: /var/lib/minikube/etcd
|
||||
extraArgs:
|
||||
proxy-refresh-interval: "70000"
|
||||
kubernetesVersion: v1.22.0-alpha.2
|
||||
kubernetesVersion: v1.22.0-beta.0
|
||||
networking:
|
||||
dnsDomain: cluster.local
|
||||
podSubnet: "10.244.0.0/16"
|
||||
|
|
|
@ -46,7 +46,7 @@ etcd:
|
|||
dataDir: /var/lib/minikube/etcd
|
||||
extraArgs:
|
||||
proxy-refresh-interval: "70000"
|
||||
kubernetesVersion: v1.22.0-alpha.2
|
||||
kubernetesVersion: v1.22.0-beta.0
|
||||
networking:
|
||||
dnsDomain: cluster.local
|
||||
podSubnet: "10.244.0.0/16"
|
||||
|
|
|
@ -40,7 +40,7 @@ etcd:
|
|||
dataDir: /var/lib/minikube/etcd
|
||||
extraArgs:
|
||||
proxy-refresh-interval: "70000"
|
||||
kubernetesVersion: v1.22.0-alpha.2
|
||||
kubernetesVersion: v1.22.0-beta.0
|
||||
networking:
|
||||
dnsDomain: cluster.local
|
||||
podSubnet: "10.244.0.0/16"
|
||||
|
|
|
@ -40,7 +40,7 @@ etcd:
|
|||
dataDir: /var/lib/minikube/etcd
|
||||
extraArgs:
|
||||
proxy-refresh-interval: "70000"
|
||||
kubernetesVersion: v1.22.0-alpha.2
|
||||
kubernetesVersion: v1.22.0-beta.0
|
||||
networking:
|
||||
dnsDomain: cluster.local
|
||||
podSubnet: "10.244.0.0/16"
|
||||
|
|
|
@ -40,7 +40,7 @@ etcd:
|
|||
dataDir: /var/lib/minikube/etcd
|
||||
extraArgs:
|
||||
proxy-refresh-interval: "70000"
|
||||
kubernetesVersion: v1.22.0-alpha.2
|
||||
kubernetesVersion: v1.22.0-beta.0
|
||||
networking:
|
||||
dnsDomain: minikube.local
|
||||
podSubnet: "10.244.0.0/16"
|
||||
|
|
|
@ -41,7 +41,7 @@ etcd:
|
|||
dataDir: /var/lib/minikube/etcd
|
||||
extraArgs:
|
||||
proxy-refresh-interval: "70000"
|
||||
kubernetesVersion: v1.22.0-alpha.2
|
||||
kubernetesVersion: v1.22.0-beta.0
|
||||
networking:
|
||||
dnsDomain: cluster.local
|
||||
podSubnet: "10.244.0.0/16"
|
||||
|
|
|
@ -43,7 +43,7 @@ etcd:
|
|||
dataDir: /var/lib/minikube/etcd
|
||||
extraArgs:
|
||||
proxy-refresh-interval: "70000"
|
||||
kubernetesVersion: v1.22.0-alpha.2
|
||||
kubernetesVersion: v1.22.0-beta.0
|
||||
networking:
|
||||
dnsDomain: cluster.local
|
||||
podSubnet: "10.244.0.0/16"
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/vmpath"
|
||||
"k8s.io/minikube/pkg/util"
|
||||
|
|
|
@ -19,7 +19,7 @@ package bsutil
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
)
|
||||
|
||||
func TestVersionIsBetween(t *testing.T) {
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
|
||||
"k8s.io/minikube/pkg/version"
|
||||
)
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"k8s.io/minikube/pkg/version"
|
||||
)
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
|
||||
// WARNING: Do not use path/filepath in this package unless you want bizarre Windows paths
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/docker/machine/libmachine"
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -568,13 +568,13 @@ func (k *Bootstrapper) needsReconfigure(conf string, hostname string, port int,
|
|||
klog.Infof("needs reconfigure: configs differ:\n%s", rr.Output())
|
||||
return true
|
||||
}
|
||||
|
||||
st, err := kverify.APIServerStatus(k.c, hostname, port)
|
||||
// cruntime.Enable() may restart kube-apiserver but does not wait for it to return back
|
||||
apiStatusTimeout := 3000 * time.Millisecond
|
||||
st, err := kverify.WaitForAPIServerStatus(k.c, apiStatusTimeout, hostname, port)
|
||||
if err != nil {
|
||||
klog.Infof("needs reconfigure: apiserver error: %v", err)
|
||||
return true
|
||||
}
|
||||
|
||||
if st != state.Running {
|
||||
klog.Infof("needs reconfigure: apiserver in state %s", st)
|
||||
return true
|
||||
|
|
|
@ -47,12 +47,10 @@ func pause(cr cruntime.Manager, r command.Runner, namespaces []string) ([]string
|
|||
|
||||
// Disable the kubelet so it does not attempt to restart paused pods
|
||||
sm := sysinit.New(r)
|
||||
if err := sm.Disable("kubelet"); err != nil {
|
||||
return ids, errors.Wrap(err, "kubelet disable")
|
||||
}
|
||||
klog.Info("kubelet running: ", sm.Active("kubelet"))
|
||||
|
||||
if err := sm.Stop("kubelet"); err != nil {
|
||||
return ids, errors.Wrap(err, "kubelet stop")
|
||||
if err := sm.DisableNow("kubelet"); err != nil {
|
||||
return ids, errors.Wrap(err, "kubelet disable --now")
|
||||
}
|
||||
|
||||
ids, err := cr.ListContainers(cruntime.ListContainersOptions{State: cruntime.Running, Namespaces: namespaces})
|
||||
|
|
|
@ -36,20 +36,10 @@ const (
|
|||
WantBetaUpdateNotification = "WantBetaUpdateNotification"
|
||||
// ReminderWaitPeriodInHours is the key for ReminderWaitPeriodInHours
|
||||
ReminderWaitPeriodInHours = "ReminderWaitPeriodInHours"
|
||||
// WantReportError is the key for WantReportError
|
||||
WantReportError = "WantReportError"
|
||||
// WantReportErrorPrompt is the key for WantReportErrorPrompt
|
||||
WantReportErrorPrompt = "WantReportErrorPrompt"
|
||||
// WantKubectlDownloadMsg is the key for WantKubectlDownloadMsg
|
||||
WantKubectlDownloadMsg = "WantKubectlDownloadMsg"
|
||||
// WantNoneDriverWarning is the key for WantNoneDriverWarning
|
||||
WantNoneDriverWarning = "WantNoneDriverWarning"
|
||||
// ProfileName represents the key for the global profile parameter
|
||||
ProfileName = "profile"
|
||||
// ShowDriverDeprecationNotification is the key for ShowDriverDeprecationNotification
|
||||
ShowDriverDeprecationNotification = "ShowDriverDeprecationNotification"
|
||||
// ShowBootstrapperDeprecationNotification is the key for ShowBootstrapperDeprecationNotification
|
||||
ShowBootstrapperDeprecationNotification = "ShowBootstrapperDeprecationNotification"
|
||||
// UserFlag is the key for the global user flag (ex. --user=user1)
|
||||
UserFlag = "user"
|
||||
// AddonImages stores custom addon images config
|
||||
|
@ -58,6 +48,8 @@ const (
|
|||
AddonRegistries = "addon-registries"
|
||||
// AddonListFlag represents the key for addons parameter
|
||||
AddonListFlag = "addons"
|
||||
// EmbedCerts represents the config for embedding certificates in kubeconfig
|
||||
EmbedCerts = "EmbedCerts"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
)
|
||||
|
||||
// Profile represents a minikube profile
|
||||
|
|
|
@ -34,10 +34,10 @@ var (
|
|||
const (
|
||||
// DefaultKubernetesVersion is the default Kubernetes version
|
||||
// dont update till #10545 is solved
|
||||
DefaultKubernetesVersion = "v1.20.7"
|
||||
DefaultKubernetesVersion = "v1.20.8"
|
||||
// NewestKubernetesVersion is the newest Kubernetes version to test against
|
||||
// NOTE: You may need to update coreDNS & etcd versions in pkg/minikube/bootstrapper/images/images.go
|
||||
NewestKubernetesVersion = "v1.22.0-alpha.2"
|
||||
NewestKubernetesVersion = "v1.22.0-beta.0"
|
||||
// OldestKubernetesVersion is the oldest Kubernetes version to test against
|
||||
OldestKubernetesVersion = "v1.14.0"
|
||||
// DefaultClusterName is the default nane for the k8s cluster
|
||||
|
@ -114,6 +114,8 @@ const (
|
|||
|
||||
// TimeFormat is the format that should be used when outputting time
|
||||
TimeFormat = time.RFC1123
|
||||
// MaxResources is the value that can be passed into the memory and cpus flags to specify to use maximum resources
|
||||
MaxResources = "max"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
|
@ -49,7 +49,6 @@ const (
|
|||
containerdConfigTemplate = `root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
oom_score = 0
|
||||
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
uid = 0
|
||||
|
@ -79,16 +78,21 @@ oom_score = 0
|
|||
enable_selinux = false
|
||||
sandbox_image = "{{ .PodInfraContainerImage }}"
|
||||
stats_collect_period = 10
|
||||
systemd_cgroup = {{ .SystemdCgroup }}
|
||||
enable_tls_streaming = false
|
||||
max_container_log_line_size = 16384
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = {{ .SystemdCgroup }}
|
||||
|
||||
[plugins.cri.containerd]
|
||||
snapshotter = "overlayfs"
|
||||
no_pivot = true
|
||||
[plugins.cri.containerd.default_runtime]
|
||||
runtime_type = "io.containerd.runtime.v1.linux"
|
||||
runtime_engine = ""
|
||||
runtime_root = ""
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins.cri.containerd.untrusted_workload_runtime]
|
||||
runtime_type = ""
|
||||
runtime_engine = ""
|
||||
|
@ -107,12 +111,6 @@ oom_score = 0
|
|||
{{ end -}}
|
||||
[plugins.diff-service]
|
||||
default = ["walking"]
|
||||
[plugins.linux]
|
||||
shim = "containerd-shim"
|
||||
runtime = "runc"
|
||||
runtime_root = ""
|
||||
no_shim = false
|
||||
shim_debug = false
|
||||
[plugins.scheduler]
|
||||
pause_threshold = 0.02
|
||||
deletion_threshold = 0
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
|
@ -163,6 +163,30 @@ type ListImagesOptions struct {
|
|||
// ErrContainerRuntimeNotRunning is thrown when container runtime is not running
|
||||
var ErrContainerRuntimeNotRunning = errors.New("container runtime is not running")
|
||||
|
||||
// ErrServiceVersion is the error returned when disk image has incompatible version of service
|
||||
type ErrServiceVersion struct {
|
||||
// Service is the name of the incompatible service
|
||||
Service string
|
||||
// Installed is the installed version of Service
|
||||
Installed string
|
||||
// Required is the minimum required version of Service
|
||||
Required string
|
||||
}
|
||||
|
||||
// NewErrServiceVersion creates a new ErrServiceVersion
|
||||
func NewErrServiceVersion(svc, required, installed string) *ErrServiceVersion {
|
||||
return &ErrServiceVersion{
|
||||
Service: svc,
|
||||
Installed: installed,
|
||||
Required: required,
|
||||
}
|
||||
}
|
||||
|
||||
func (e ErrServiceVersion) Error() string {
|
||||
return fmt.Sprintf("service %q version is %v. Required: %v",
|
||||
e.Service, e.Installed, e.Required)
|
||||
}
|
||||
|
||||
// New returns an appropriately configured runtime
|
||||
func New(c Config) (Manager, error) {
|
||||
sm := sysinit.New(c.Runner)
|
||||
|
@ -243,3 +267,29 @@ func disableOthers(me Manager, cr CommandRunner) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var requiredContainerdVersion = semver.MustParse("1.4.0")
|
||||
|
||||
// compatibleWithVersion checks if current version of "runtime" is compatible with version "v"
|
||||
func compatibleWithVersion(runtime, v string) error {
|
||||
if runtime == "containerd" {
|
||||
vv, err := semver.Make(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if requiredContainerdVersion.GT(vv) {
|
||||
return NewErrServiceVersion(runtime, requiredContainerdVersion.String(), vv.String())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckCompatibility checks if the container runtime managed by "cr" is compatible with current minikube code
|
||||
// returns: NewErrServiceVersion if not
|
||||
func CheckCompatibility(cr Manager) error {
|
||||
v, err := cr.Version()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to check container runtime version")
|
||||
}
|
||||
return compatibleWithVersion(cr.Name(), v)
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
|
|
|
@ -77,6 +77,15 @@ func IsAmd64M1Emulation() bool {
|
|||
return runtime.GOARCH == "amd64" && strings.HasPrefix(cpuid.CPU.BrandName, "VirtualApple")
|
||||
}
|
||||
|
||||
// EffectiveArch return architecture to use in minikube VM/container
|
||||
// may differ from host arch
|
||||
func EffectiveArch() string {
|
||||
if IsAmd64M1Emulation() {
|
||||
return "arm64"
|
||||
}
|
||||
return runtime.GOARCH
|
||||
}
|
||||
|
||||
// MinikubeInstalledViaSnap returns true if the minikube binary path includes "snap".
|
||||
func MinikubeInstalledViaSnap() bool {
|
||||
ex, err := os.Executable()
|
||||
|
|
|
@ -22,7 +22,9 @@ import (
|
|||
"path"
|
||||
"runtime"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"k8s.io/minikube/pkg/minikube/detect"
|
||||
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
|
@ -70,7 +72,7 @@ func Binary(binary, version, osName, archName string) (string, error) {
|
|||
return "", errors.Wrapf(err, "download failed: %s", url)
|
||||
}
|
||||
|
||||
if osName == runtime.GOOS && archName == runtime.GOARCH {
|
||||
if osName == runtime.GOOS && archName == detect.EffectiveArch() {
|
||||
if err = os.Chmod(targetFilepath, 0755); err != nil {
|
||||
return "", errors.Wrapf(err, "chmod +x %s", targetFilepath)
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue