Merge master

pull/7247/head
Thomas Stromberg 2020-04-23 14:31:47 -07:00
commit e5cc661733
83 changed files with 339 additions and 93 deletions

View File

@ -1,5 +1,27 @@
# Release Notes
## Version 1.10.0-beta.1 - 2020-04-22
Improvements:
* Skip preload download if --image-repository is set [#7707](https://github.com/kubernetes/minikube/pull/7707)
Bug Fixes:
* ISO: persistently mount /var/lib/containerd [#7843](https://github.com/kubernetes/minikube/pull/7843)
* docker/podman: fix delete -p not cleaning up & add integration test [#7819](https://github.com/kubernetes/minikube/pull/7819)
Huge thank you for this release towards our contributors:
- Anders F Björklund
- Kenta Iso
- Medya Ghazizadeh
- Prasad Katti
- Priya Wadhwa
- Sharif Elgamal
- Thomas Stromberg
- Tobias Klauser
## Version 1.10.0-beta.0 - 2020-04-20
Improvements:

View File

@ -15,7 +15,7 @@
# Bump these on release - and please check ISO_VERSION for correctness.
VERSION_MAJOR ?= 1
VERSION_MINOR ?= 10
VERSION_BUILD ?= 0-beta.0
VERSION_BUILD ?= 0-beta.1
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
VERSION ?= v$(RAW_VERSION)
@ -255,7 +255,7 @@ docker-machine-driver-hyperkit: out/docker-machine-driver-hyperkit ## Build Hype
docker-machine-driver-kvm2: out/docker-machine-driver-kvm2 ## Build KVM2 driver
.PHONY: integration
integration: out/minikube ## Trigger minikube integration test
integration: out/minikube$(IS_EXE) ## Trigger minikube integration test
go test -v -test.timeout=60m ./test/integration --tags="$(MINIKUBE_INTEGRATION_BUILD_TAGS)" $(TEST_ARGS)
.PHONY: integration-none-driver
@ -397,6 +397,10 @@ reportcard: ## Run goreportcard for minikube
mdlint:
@$(MARKDOWNLINT) $(MINIKUBE_MARKDOWN_FILES)
.PHONY: verify-iso
verify-iso: # Make sure the current ISO exists in the expected bucket
gsutil stat gs://$(ISO_BUCKET)/minikube-$(ISO_VERSION).iso
out/docs/minikube.md: $(shell find "cmd") $(shell find "pkg/minikube/constants") pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go
go run -ldflags="$(MINIKUBE_LDFLAGS)" -tags gendocs hack/help_text/gen_help_text.go

View File

@ -75,7 +75,6 @@ var nodeStartCmd = &cobra.Command{
}
func init() {
nodeStartCmd.Flags().String("name", "", "The name of the node to start")
nodeStartCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
nodeCmd.AddCommand(nodeStartCmd)
}

View File

@ -53,6 +53,5 @@ var nodeStopCmd = &cobra.Command{
}
func init() {
nodeStopCmd.Flags().String("name", "", "The name of the node to delete")
nodeCmd.AddCommand(nodeStopCmd)
}

View File

@ -60,7 +60,6 @@ import (
"k8s.io/minikube/pkg/minikube/registry"
"k8s.io/minikube/pkg/minikube/translate"
"k8s.io/minikube/pkg/util"
pkgutil "k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/version"
)
@ -751,7 +750,7 @@ func suggestMemoryAllocation(sysLimit int, containerLimit int) int {
// validateMemorySize validates the memory size matches the minimum recommended
func validateMemorySize() {
req, err := pkgutil.CalculateSizeInMB(viper.GetString(memory))
req, err := util.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
exit.WithCodeT(exit.Config, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
}
@ -787,7 +786,7 @@ func validateCPUCount(local bool) {
// validateFlags validates the supplied flags against known bad combinations
func validateFlags(cmd *cobra.Command, drvName string) {
if cmd.Flags().Changed(humanReadableDiskSize) {
diskSizeMB, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
diskSizeMB, err := util.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if err != nil {
exit.WithCodeT(exit.Config, "Validation unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
}

View File

@ -26,6 +26,7 @@ import (
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/drivers/kic"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
"k8s.io/minikube/pkg/minikube/config"
@ -98,6 +99,7 @@ const (
nodes = "nodes"
preload = "preload"
deleteOnFailure = "delete-on-failure"
kicBaseImage = "base-image"
)
// initMinikubeFlags includes commandline flags for minikube.
@ -118,6 +120,7 @@ func initMinikubeFlags() {
startCmd.Flags().Bool(downloadOnly, false, "If true, only download and cache files for later use - don't install or start anything.")
startCmd.Flags().Bool(cacheImages, true, "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none.")
startCmd.Flags().StringSlice(isoURL, download.DefaultISOURLs(), "Locations to fetch the minikube ISO from.")
startCmd.Flags().String(kicBaseImage, kic.BaseImage, "The base image to use for docker/podman drivers. Intended for local development.")
startCmd.Flags().Bool(keepContext, false, "This will keep the existing kubectl context and will create a minikube context.")
startCmd.Flags().Bool(embedCerts, false, "if true, will embed the certs in kubeconfig.")
startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd).")

View File

@ -104,12 +104,11 @@ var statusCmd = &cobra.Command{
cname := ClusterFlagValue()
api, cc := mustload.Partial(cname)
var st *Status
var err error
var statuses []*Status
for _, n := range cc.Nodes {
glog.Infof("checking status of %s ...", n.Name)
machineName := driver.MachineName(*cc, n)
st, err = status(api, *cc, n)
st, err := status(api, *cc, n)
glog.Infof("%s status: %+v", machineName, st)
if err != nil {
@ -118,36 +117,40 @@ var statusCmd = &cobra.Command{
if st.Host == Nonexistent {
glog.Errorf("The %q host does not exist!", machineName)
}
statuses = append(statuses, st)
}
switch strings.ToLower(output) {
case "text":
switch strings.ToLower(output) {
case "text":
for _, st := range statuses {
if err := statusText(st, os.Stdout); err != nil {
exit.WithError("status text failure", err)
}
case "json":
if err := statusJSON(st, os.Stdout); err != nil {
exit.WithError("status json failure", err)
}
default:
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
}
case "json":
if err := statusJSON(statuses, os.Stdout); err != nil {
exit.WithError("status json failure", err)
}
default:
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
}
// TODO: Update for multi-node
os.Exit(exitCode(st))
os.Exit(exitCode(statuses))
},
}
func exitCode(st *Status) int {
func exitCode(statuses []*Status) int {
c := 0
if st.Host != state.Running.String() {
c |= minikubeNotRunningStatusFlag
}
if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() {
c |= clusterNotRunningStatusFlag
}
if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant {
c |= k8sNotRunningStatusFlag
for _, st := range statuses {
if st.Host != state.Running.String() {
c |= minikubeNotRunningStatusFlag
}
if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() {
c |= clusterNotRunningStatusFlag
}
if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant {
c |= k8sNotRunningStatusFlag
}
}
return c
}
@ -270,8 +273,15 @@ func statusText(st *Status, w io.Writer) error {
return nil
}
func statusJSON(st *Status, w io.Writer) error {
js, err := json.Marshal(st)
func statusJSON(st []*Status, w io.Writer) error {
var js []byte
var err error
// Keep backwards compat with single node clusters to not break anyone
if len(st) == 1 {
js, err = json.Marshal(st[0])
} else {
js, err = json.Marshal(st)
}
if err != nil {
return err
}

View File

@ -35,7 +35,7 @@ func TestExitCode(t *testing.T) {
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := exitCode(tc.state)
got := exitCode([]*Status{tc.state})
if got != tc.want {
t.Errorf("exitcode(%+v) = %d, want: %d", tc.state, got, tc.want)
}
@ -93,7 +93,7 @@ func TestStatusJSON(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
var b bytes.Buffer
err := statusJSON(tc.state, &b)
err := statusJSON([]*Status{tc.state}, &b)
if err != nil {
t.Errorf("json(%+v) error: %v", tc.state, err)
}

View File

@ -101,6 +101,10 @@ if [ -n "$BOOT2DOCKER_DATA" ]; then
mkdir -p /var/lib/docker
mount --bind /mnt/$PARTNAME/var/lib/docker /var/lib/docker
mkdir -p /mnt/$PARTNAME/var/lib/containerd
mkdir -p /var/lib/containerd
mount --bind /mnt/$PARTNAME/var/lib/containerd /var/lib/containerd
mkdir -p /mnt/$PARTNAME/var/lib/containers
mkdir -p /var/lib/containers
mount --bind /mnt/$PARTNAME/var/lib/containers /var/lib/containers

View File

@ -30,7 +30,7 @@ endef
define PODMAN_INSTALL_TARGET_CMDS
$(INSTALL) -Dm755 $(@D)/bin/podman $(TARGET_DIR)/usr/bin/podman
$(INSTALL) -d -m 755 $(TARGET_DIR)/etc/cni/net.d/
$(INSTALL) -m 644 cni/87-podman-bridge.conflist $(TARGET_DIR)/etc/cni/net.d/87-podman-bridge.conflist
$(INSTALL) -m 644 $(@D)/cni/87-podman-bridge.conflist $(TARGET_DIR)/etc/cni/net.d/87-podman-bridge.conflist
endef
$(eval $(generic-package))

View File

@ -27,7 +27,7 @@ set -e
OS_ARCH="linux-amd64"
VM_DRIVER="podman"
JOB_NAME="Podman_Linux"
JOB_NAME="Experimental_Podman_Linux"
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"

View File

@ -38,6 +38,10 @@ grep -E "^VERSION_BUILD \\?=" Makefile | grep "${VERSION_BUILD}"
# Force go packages to the Jekins home directory
export GOPATH=$HOME/go
# Verify ISO exists
echo "Verifying ISO exists ..."
make verify-iso
# Build and upload
env BUILD_IN_DOCKER=y \
make -j 16 \

View File

@ -75,34 +75,13 @@ github-release -v release ${RELEASE_FLAGS} \
--name "${TAGNAME}" \
--description "${DESCRIPTION}"
# Uploading the files into github
FILES_TO_UPLOAD=(
'minikube-linux-amd64'
'minikube-linux-amd64.sha256'
'minikube-darwin-amd64'
'minikube-darwin-amd64.sha256'
'minikube-windows-amd64.exe'
'minikube-windows-amd64.exe.sha256'
'minikube-installer.exe'
"minikube_${DEB_VERSION}-0_amd64.deb"
"minikube-${RPM_VERSION}-0.x86_64.rpm"
'docker-machine-driver-kvm2'
'docker-machine-driver-kvm2.sha256'
'docker-machine-driver-hyperkit'
'docker-machine-driver-hyperkit.sha256'
)
# ISO files are special, as they are generated pre-release tagging
ISO_FILES=("minikube-v${VERSION}.iso" "minikube-v${VERSION}.iso.sha256")
for DOWNLOAD in "${ISO_FILES[@]}"
do
gsutil cp "gs://${ISO_BUCKET}/${DOWNLOAD}" out/ \
&& FILES_TO_UPLOAD+=("${DOWNLOAD}") \
|| echo "${DOWNLOAD} was not generated for this release"
# ISO files are built from a separate process, and may not be included in this release
for path in $(gsutil ls "gs://${ISO_BUCKET}/minikube-v${VERSION}*" || true); do
gsutil cp "${path}" out/
done
for UPLOAD in "${FILES_TO_UPLOAD[@]}"
do
# Upload all end-user assets other than preload files, as they are release independent
for file in out/minikube[_-]* out/docker-machine-*; do
n=0
until [ $n -ge 5 ]
do
@ -110,8 +89,8 @@ do
--user "${GITHUB_ORGANIZATION}" \
--repo "${GITHUB_REPO}" \
--tag "${TAGNAME}" \
--name "$UPLOAD" \
--file "out/$UPLOAD" && break
--name "$(basename ${file})" \
--file "${file}" && break
n=$((n+1))
sleep 15
done

View File

@ -31,7 +31,6 @@ import (
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/sysinit"
"k8s.io/minikube/pkg/util"
@ -41,7 +40,7 @@ import (
func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string) error {
driver := kic.NewDriver(kic.Config{
KubernetesVersion: kubernetesVersion,
ContainerRuntime: driver.Docker,
ContainerRuntime: containerRuntime,
OCIBinary: oci.Docker,
MachineName: profile,
ImageDigest: kic.BaseImage,

View File

@ -50,6 +50,8 @@ var componentToKubeadmConfigKey = map[string]string{
ControllerManager: "controllerManager",
Scheduler: "scheduler",
Kubeadm: "kubeadm",
// The KubeProxy is handled in different config block
Kubeproxy: "",
// The Kubelet is not configured in kubeadm, only in systemd.
Kubelet: "",
}
@ -178,6 +180,9 @@ func optionPairsForComponent(component string, version semver.Version, cp config
return nil
}
// kubeadm extra args should not be included in the kubeadm config in the extra args section (instead, they must
// be inserted explicitly in the appropriate places or supplied from the command line); here we remove all of the
// kubeadm extra args from the slice
// createExtraComponentConfig generates a map of component to extra args for all of the components except kubeadm
func createExtraComponentConfig(extraOptions config.ExtraOptionSlice, version semver.Version, componentFeatureArgs string, cp config.Node) ([]componentOptions, error) {
extraArgsSlice, err := newComponentOptions(extraOptions, version, componentFeatureArgs, cp)
@ -185,9 +190,6 @@ func createExtraComponentConfig(extraOptions config.ExtraOptionSlice, version se
return nil, err
}
// kubeadm extra args should not be included in the kubeadm config in the extra args section (instead, they must
// be inserted explicitly in the appropriate places or supplied from the command line); here we remove all of the
// kubeadm extra args from the slice
for i, extraArgs := range extraArgsSlice {
if extraArgs.Component == Kubeadm {
extraArgsSlice = append(extraArgsSlice[:i], extraArgsSlice[i+1:]...)
@ -197,6 +199,12 @@ func createExtraComponentConfig(extraOptions config.ExtraOptionSlice, version se
return extraArgsSlice, nil
}
// createKubeProxyOptions generates a map of extra config for kube-proxy
func createKubeProxyOptions(extraOptions config.ExtraOptionSlice) map[string]string {
kubeProxyOptions := extraOptions.AsMap().Get(Kubeproxy)
return kubeProxyOptions
}
func convertToFlags(opts map[string]string) string {
var flags []string
var keys []string

View File

@ -82,5 +82,9 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "{{.PodSubnet }}"
metricsBindAddress: {{.AdvertiseAddress}}:10249
{{- range $i, $val := printMapInOrder .KubeProxyOptions ": " }}
{{$val}}
{{- end}}
`))

View File

@ -80,5 +80,9 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "{{.PodSubnet }}"
metricsBindAddress: {{.AdvertiseAddress}}:10249
{{- range $i, $val := printMapInOrder .KubeProxyOptions ": " }}
{{$val}}
{{- end}}
`))

View File

@ -83,6 +83,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana
NoTaintMaster bool
NodeIP string
ControlPlaneAddress string
KubeProxyOptions map[string]string
}{
CertDir: vmpath.GuestKubernetesCertsDir,
ServiceCIDR: constants.DefaultServiceCIDR,
@ -102,6 +103,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana
DNSDomain: k8s.DNSDomain,
NodeIP: n.IP,
ControlPlaneAddress: constants.ControlPlaneAlias,
KubeProxyOptions: createKubeProxyOptions(k8s.ExtraOptions),
}
if k8s.ServiceCIDR != "" {
@ -135,6 +137,7 @@ const (
Apiserver = "apiserver"
Scheduler = "scheduler"
ControllerManager = "controller-manager"
Kubeproxy = "kube-proxy"
)
// InvokeKubeadm returns the invocation command for Kubeadm

View File

@ -55,6 +55,11 @@ func getExtraOpts() []config.ExtraOption {
Key: "dry-run",
Value: "true",
},
config.ExtraOption{
Component: Kubeproxy,
Key: "mode",
Value: "iptables",
},
}
}

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -60,4 +60,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -51,4 +51,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -57,4 +57,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -60,4 +60,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -51,4 +51,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -57,4 +57,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -60,4 +60,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -50,4 +50,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -51,4 +51,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -57,4 +57,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -58,4 +58,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -49,4 +49,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -55,4 +55,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -58,4 +58,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -49,4 +49,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -55,4 +55,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -58,4 +58,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -48,4 +48,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -49,4 +49,5 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249

View File

@ -55,4 +55,6 @@ evictionHard:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -25,7 +25,6 @@ import (
"github.com/spf13/viper"
"golang.org/x/sync/errgroup"
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/drivers/kic"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/download"
@ -102,11 +101,12 @@ func doCacheBinaries(k8sVersion string) error {
// BeginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available
func beginDownloadKicArtifacts(g *errgroup.Group) {
glog.Info("Beginning downloading kic artifacts")
if !image.ExistsImageInDaemon(kic.BaseImage) {
baseImage := viper.GetString("base-image")
if !image.ExistsImageInDaemon(baseImage) {
out.T(out.Pulling, "Pulling base image ...")
g.Go(func() error {
glog.Infof("Downloading %s to local daemon", kic.BaseImage)
return image.WriteImageToDaemon(kic.BaseImage)
glog.Infof("Downloading %s to local daemon", baseImage)
return image.WriteImageToDaemon(baseImage)
})
}
}

View File

@ -26,6 +26,7 @@ import (
"github.com/docker/machine/libmachine/drivers"
"github.com/golang/glog"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/drivers/kic"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/config"
@ -59,7 +60,7 @@ func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
return kic.NewDriver(kic.Config{
MachineName: driver.MachineName(cc, n),
StorePath: localpath.MiniPath(),
ImageDigest: kic.BaseImage,
ImageDigest: viper.GetString("base-image"),
CPU: cc.CPUs,
Memory: cc.Memory,
OCIBinary: oci.Docker,

View File

@ -89,9 +89,9 @@ func status() registry.State {
ctx, cancel := context.WithTimeout(context.Background(), 8*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, path, "Get-WindowsOptionalFeature", "-FeatureName", "Microsoft-Hyper-V-All", "-Online")
cmd := exec.CommandContext(ctx, path, "@(Get-Wmiobject Win32_ComputerSystem).HypervisorPresent")
out, err := cmd.CombinedOutput()
if err != nil {
if string(out) != "True\r\n" {
errorMessage := fmt.Errorf("%s failed:\n%s", strings.Join(cmd.Args, " "), out)
fixMessage := "Start PowerShell as Administrator, and run: 'Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Hyper-V -All'"

View File

@ -26,6 +26,7 @@ import (
"github.com/blang/semver"
"github.com/docker/machine/libmachine/drivers"
"github.com/golang/glog"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/drivers/kic"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/config"
@ -50,10 +51,11 @@ func init() {
}
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
baseImage := viper.GetString("base-image")
return kic.NewDriver(kic.Config{
MachineName: driver.MachineName(cc, n),
StorePath: localpath.MiniPath(),
ImageDigest: strings.Split(kic.BaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest.
ImageDigest: strings.Split(baseImage, "@")[0], // for podman does not support docker images references with both a tag and digest.
CPU: cc.CPUs,
Memory: cc.Memory,
OCIBinary: oci.Podman,

View File

@ -155,7 +155,6 @@ minikube node start [flags]
```
--delete-on-failure If set, delete the current cluster if start fails and try again. Defaults to false.
-h, --help help for start
--name string The name of the node to start
```
### Options inherited from parent commands
@ -187,8 +186,7 @@ minikube node stop [flags]
### Options
```
-h, --help help for stop
--name string The name of the node to delete
-h, --help help for stop
```
### Options inherited from parent commands

View File

@ -27,6 +27,7 @@ minikube start [flags]
--apiserver-names stringArray A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine
--apiserver-port int The apiserver listening port (default 8443)
--auto-update-drivers If set, automatically updates drivers to the latest version. Defaults to true. (default true)
--base-image string The base image to use for docker/podman drivers. Intended for local development. (default "gcr.io/k8s-minikube/kicbase:v0.0.9@sha256:82a826cc03c3e59ead5969b8020ca138de98f366c1907293df91fc57205dbb53")
--cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none. (default true)
--container-runtime string The container runtime to be used (docker, crio, containerd). (default "docker")
--cpus int Number of CPUs allocated to Kubernetes. (default 2)
@ -63,7 +64,7 @@ minikube start [flags]
--insecure-registry strings Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.
--install-addons If set, install addons. Defaults to true. (default true)
--interactive Allow user prompts for more information (default true)
--iso-url strings Locations to fetch the minikube ISO from. (default [https://storage.googleapis.com/minikube/iso/minikube-v1.10.0-beta.0.iso,https://github.com/kubernetes/minikube/releases/download/v1.10.0-beta.0/minikube-v1.10.0-beta.0.iso,https://kubernetes.oss-cn-hangzhou.aliyuncs.com/minikube/iso/minikube-v1.10.0-beta.0.iso])
--iso-url strings Locations to fetch the minikube ISO from. (default [https://storage.googleapis.com/minikube/iso/minikube-v1.10.0-beta.1.iso,https://github.com/kubernetes/minikube/releases/download/v1.10.0-beta.1/minikube-v1.10.0-beta.1.iso,https://kubernetes.oss-cn-hangzhou.aliyuncs.com/minikube/iso/minikube-v1.10.0-beta.1.iso])
--keep-context This will keep the existing kubectl context and will create a minikube context.
--kubernetes-version string The kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.18.0, 'latest' for v1.18.0). Defaults to 'stable'.
--kvm-gpu Enable experimental NVIDIA GPU support in minikube

View File

@ -6,7 +6,7 @@ aliases:
---
## Overview
Hyper-V is a native hypervisor built in to modern versions of Microsoft Windows.
[Hyper-V](https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/) is a native hypervisor built in to modern versions of Microsoft Windows.
{{% readfile file="/docs/drivers/includes/hyperv_usage.inc" %}}
@ -14,7 +14,9 @@ Hyper-V is a native hypervisor built in to modern versions of Microsoft Windows.
The `minikube start` command supports additional hyperv specific flags:
* **`--hyperv-virtual-switch`**: The hyperv virtual switch name. Defaults to first found
* **`--hyperv-virtual-switch`**: Name of the virtual switch the minikube VM should use. Defaults to first found
* **`--hyperv-use-external-switch`**: Use external virtual switch over Default Switch if virtual switch not explicitly specified, creates a new one if not found. If the adapter is not specified, the driver first looks up LAN adapters before other adapters (WiFi, ...). Or the user may specify an adapter to attach to the external switch. Default false
* **`--hyperv-external-adapter`**: External adapter on which the new external switch is created if no existing external switch is found. Since Windows 10 only allows one external switch for the same adapter, it finds the virtual switch before creating one. The external switch is created and named "minikube"
## Issues

View File

@ -1,6 +1,6 @@
## Requirements
* Windows 10 Enterprise, Pro, or Education ([system requirements](https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/hyper-v-requirements))
* 64-bit versions of Windows 10 Enterprise, Pro, or Education ([system requirements](https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/hyper-v-requirements))
* Hyper-V enabled
## Enabling Hyper-V

View File

@ -639,7 +639,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) {
// validateServiceCmd asserts basic "service" command functionality
func validateServiceCmd(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=gcr.io/hello-minikube-zero-install/hello-node"))
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=k8s.gcr.io/echoserver:1.4"))
if err != nil {
t.Logf("%q failed: %v (may not be an error).", rr.Command(), err)
}

View File

@ -29,24 +29,154 @@ func TestMultiNode(t *testing.T) {
if NoneDriver() {
t.Skip("none driver does not support multinode")
}
MaybeParallel(t)
type validatorFunc func(context.Context, *testing.T, string)
profile := UniqueProfileName("multinode")
ctx, cancel := context.WithTimeout(context.Background(), Minutes(30))
defer CleanupWithLogs(t, profile, cancel)
startArgs := append([]string{"start", "-p", profile, "--wait=true"}, StartArgs()...)
t.Run("serial", func(t *testing.T) {
tests := []struct {
name string
validator validatorFunc
}{
{"FreshStart2Nodes", validateMultiNodeStart},
{"AddNode", validateAddNodeToMultiNode},
{"StopNode", validateStopRunningNode},
{"StartAfterStop", validateStartNodeAfterStop},
{"DeleteNode", validateDeleteNodeFromMultiNode},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
tc.validator(ctx, t, profile)
})
}
})
}
func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) {
// Start a 2 node cluster with the --nodes param
startArgs := append([]string{"start", "-p", profile, "--wait=true", "--nodes=2"}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err)
}
// Add a node to the current cluster
addArgs := []string{"node", "add", "-p", profile, "-v", "3", "--alsologtostderr"}
rr, err = Run(t, exec.CommandContext(ctx, Target(), addArgs...))
if err != nil {
t.Fatalf("failed to add node to current cluster. args %q : %v", rr.Command(), err)
}
// Make sure minikube status shows 2 nodes
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
if err != nil {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
if strings.Count(rr.Stdout.String(), "host: Running") != 2 {
t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 {
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
}
func validateAddNodeToMultiNode(ctx context.Context, t *testing.T, profile string) {
// Add a node to the current cluster
addArgs := []string{"node", "add", "-p", profile, "-v", "3", "--alsologtostderr"}
rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...))
if err != nil {
t.Fatalf("failed to add node to current cluster. args %q : %v", rr.Command(), err)
}
// Make sure minikube status shows 3 nodes
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
if err != nil {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
if strings.Count(rr.Stdout.String(), "host: Running") != 3 {
t.Errorf("status says all hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 {
t.Errorf("status says all kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
}
func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) {
// Names are autogenerated using the node.Name() function
name := "m03"
// Run minikube node stop on that node
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", name))
if err != nil {
t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err)
}
// Run status again to see the stopped host
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
// Exit code 7 means one host is stopped, which we are expecting
if err != nil && rr.ExitCode != 7 {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
// Make sure minikube status shows 2 running nodes and 1 stopped one
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
if err != nil && rr.ExitCode != 7 {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 {
t.Errorf("incorrect number of running kubelets: args %q: %v", rr.Command(), rr.Stdout.String())
}
if strings.Count(rr.Stdout.String(), "host: Stopped") != 1 {
t.Errorf("incorrect number of stopped hosts: args %q: %v", rr.Command(), rr.Stdout.String())
}
if strings.Count(rr.Stdout.String(), "kubelet: Stopped") != 1 {
t.Errorf("incorrect number of stopped kubelets: args %q: %v", rr.Command(), rr.Stdout.String())
}
}
func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile string) {
// TODO (#7496): remove skip once restarts work
t.Skip("Restarting nodes is broken :(")
// Grab the stopped node
name := "m03"
// Start the node back up
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", name))
if err != nil {
t.Errorf("node start returned an error. args %q: %v", rr.Command(), err)
}
// Make sure minikube status shows 3 running hosts
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
if err != nil {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
if strings.Count(rr.Stdout.String(), "host: Running") != 3 {
t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 {
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
}
func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile string) {
name := "m03"
// Start the node back up
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "delete", name))
if err != nil {
t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err)
}
// Make sure status is back down to 2 hosts
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
if err != nil {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)