From 9e95435e0020eed065ee0229a6a54a7e54530a6d Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Tue, 23 Jun 2020 20:10:25 -0700 Subject: [PATCH 01/12] Add --cni flag, fix --network-plugin=kubenet --- cmd/minikube/cmd/node_add.go | 4 +- cmd/minikube/cmd/start_flags.go | 59 +- .../cloudshell_integration_tests_none.sh | 44 ++ hack/jenkins/linux_conformance_tests_kvm.sh | 39 ++ hack/preload-images/generate.go | 3 +- pkg/drivers/kic/types.go | 3 - pkg/minikube/bootstrapper/bsutil/files.go | 25 +- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 15 +- pkg/minikube/bootstrapper/bsutil/kubelet.go | 6 + .../bootstrapper/bsutil/kverify/node_ready.go | 2 +- .../bsutil/kverify/system_pods.go | 18 +- .../testdata/v1.12/containerd-api-port.yaml | 2 +- .../bsutil/testdata/v1.12/containerd.yaml | 2 +- .../testdata/v1.12/crio-options-gates.yaml | 2 +- .../bsutil/testdata/v1.12/crio.yaml | 2 +- .../bsutil/testdata/v1.12/default.yaml | 2 +- .../bsutil/testdata/v1.12/dns.yaml | 2 +- .../testdata/v1.12/image-repository.yaml | 2 +- .../bsutil/testdata/v1.12/options.yaml | 2 +- .../testdata/v1.13/containerd-api-port.yaml | 2 +- .../bsutil/testdata/v1.13/containerd.yaml | 2 +- .../testdata/v1.13/crio-options-gates.yaml | 2 +- .../bsutil/testdata/v1.13/crio.yaml | 2 +- .../bsutil/testdata/v1.13/default.yaml | 2 +- .../bsutil/testdata/v1.13/dns.yaml | 2 +- .../testdata/v1.13/image-repository.yaml | 2 +- .../bsutil/testdata/v1.13/options.yaml | 2 +- .../testdata/v1.14/containerd-api-port.yaml | 4 +- .../bsutil/testdata/v1.14/containerd.yaml | 4 +- .../testdata/v1.14/crio-options-gates.yaml | 4 +- .../bsutil/testdata/v1.14/crio.yaml | 4 +- .../bsutil/testdata/v1.14/default.yaml | 4 +- .../bsutil/testdata/v1.14/dns.yaml | 4 +- .../testdata/v1.14/image-repository.yaml | 4 +- .../bsutil/testdata/v1.14/options.yaml | 4 +- .../testdata/v1.15/containerd-api-port.yaml | 4 +- .../bsutil/testdata/v1.15/containerd.yaml | 4 +- .../testdata/v1.15/crio-options-gates.yaml | 4 +- .../bsutil/testdata/v1.15/crio.yaml | 4 +- .../bsutil/testdata/v1.15/default.yaml | 4 +- .../bsutil/testdata/v1.15/dns.yaml | 4 +- .../testdata/v1.15/image-repository.yaml | 4 +- .../bsutil/testdata/v1.15/options.yaml | 4 +- .../testdata/v1.16/containerd-api-port.yaml | 4 +- .../bsutil/testdata/v1.16/containerd.yaml | 4 +- .../testdata/v1.16/crio-options-gates.yaml | 4 +- .../bsutil/testdata/v1.16/crio.yaml | 4 +- .../bsutil/testdata/v1.16/default.yaml | 4 +- .../bsutil/testdata/v1.16/dns.yaml | 4 +- .../testdata/v1.16/image-repository.yaml | 4 +- .../bsutil/testdata/v1.16/options.yaml | 4 +- .../testdata/v1.17/containerd-api-port.yaml | 4 +- .../bsutil/testdata/v1.17/containerd.yaml | 4 +- .../testdata/v1.17/crio-options-gates.yaml | 4 +- .../bsutil/testdata/v1.17/crio.yaml | 4 +- .../bsutil/testdata/v1.17/default.yaml | 4 +- .../bsutil/testdata/v1.17/dns.yaml | 4 +- .../testdata/v1.17/image-repository.yaml | 4 +- .../bsutil/testdata/v1.17/options.yaml | 4 +- .../testdata/v1.18/containerd-api-port.yaml | 4 +- .../bsutil/testdata/v1.18/containerd.yaml | 4 +- .../testdata/v1.18/crio-options-gates.yaml | 4 +- .../bsutil/testdata/v1.18/crio.yaml | 4 +- .../bsutil/testdata/v1.18/default.yaml | 4 +- .../bsutil/testdata/v1.18/dns.yaml | 4 +- .../testdata/v1.18/image-repository.yaml | 4 +- .../bsutil/testdata/v1.18/options.yaml | 4 +- .../testdata/v1.19/containerd-api-port.yaml | 4 +- .../bsutil/testdata/v1.19/containerd.yaml | 4 +- .../testdata/v1.19/crio-options-gates.yaml | 4 +- .../bsutil/testdata/v1.19/crio.yaml | 4 +- .../bsutil/testdata/v1.19/default.yaml | 4 +- .../bsutil/testdata/v1.19/dns.yaml | 4 +- .../testdata/v1.19/image-repository.yaml | 4 +- .../bsutil/testdata/v1.19/options.yaml | 4 +- pkg/minikube/bootstrapper/images/images.go | 9 + pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 91 +-- .../bootstrapper/kubeadm/kubeadm.go.rej | 16 + pkg/minikube/cluster/cluster.go | 11 +- pkg/minikube/cni/bridge.go | 83 +++ pkg/minikube/cni/cni.go | 160 +++++ pkg/minikube/cni/custom.go | 66 ++ pkg/minikube/cni/disabled.go | 51 ++ pkg/minikube/cni/flannel.go | 646 ++++++++++++++++++ .../kubeadm/default_cni.go => cni/kindnet.go} | 83 ++- pkg/minikube/config/config.go | 12 - pkg/minikube/config/profile.go | 6 - pkg/minikube/config/types.go | 12 +- pkg/minikube/cruntime/containerd.go | 5 - pkg/minikube/cruntime/crio.go | 5 - pkg/minikube/cruntime/cruntime.go | 2 - pkg/minikube/cruntime/docker.go | 5 - pkg/minikube/driver/driver.go | 3 - pkg/minikube/node/start.go | 18 +- pkg/minikube/out/style.go | 1 + pkg/minikube/out/style_enum.go | 1 + site/content/en/docs/commands/start.md | 5 +- test/integration/functional_test.go | 31 - test/integration/helpers_test.go | 1 + test/integration/net_test.go | 186 +++++ .../testdata/netcat-deployment-nomaster.yaml | 31 + .../testdata/netcat-deployment.yaml | 32 + test/integration/testdata/weavenet.yaml | 255 +++++++ 103 files changed, 1925 insertions(+), 344 deletions(-) create mode 100755 hack/jenkins/cloudshell_integration_tests_none.sh create mode 100755 hack/jenkins/linux_conformance_tests_kvm.sh create mode 100644 pkg/minikube/bootstrapper/kubeadm/kubeadm.go.rej create mode 100644 pkg/minikube/cni/bridge.go create mode 100644 pkg/minikube/cni/cni.go create mode 100644 pkg/minikube/cni/custom.go create mode 100644 pkg/minikube/cni/disabled.go create mode 100644 pkg/minikube/cni/flannel.go rename pkg/minikube/{bootstrapper/kubeadm/default_cni.go => cni/kindnet.go} (68%) create mode 100644 test/integration/net_test.go create mode 100644 test/integration/testdata/netcat-deployment-nomaster.yaml create mode 100644 test/integration/testdata/netcat-deployment.yaml create mode 100644 test/integration/testdata/weavenet.yaml diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 8e7280ae43..56fd1958fb 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -70,9 +70,7 @@ var nodeAddCmd = &cobra.Command{ } } - // Add CNI config if it's not already there - // We need to run kubeadm.init here as well - if err := config.MultiNodeCNIConfig(cc); err != nil { + if err := config.SaveProfile(cc.Name, cc); err != nil { exit.WithError("failed to save config", err) } diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index 64d5b96b1c..4ee4e71ea7 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -29,6 +29,7 @@ import ( "k8s.io/minikube/pkg/drivers/kic" "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify" + "k8s.io/minikube/pkg/minikube/cni" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" @@ -54,6 +55,7 @@ const ( criSocket = "cri-socket" networkPlugin = "network-plugin" enableDefaultCNI = "enable-default-cni" + cniFlag = "cni" hypervVirtualSwitch = "hyperv-virtual-switch" hypervUseExternalSwitch = "hyperv-use-external-switch" hypervExternalAdapter = "hyperv-external-adapter" @@ -130,8 +132,9 @@ func initMinikubeFlags() { startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.") startCmd.Flags().StringArrayVar(&config.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.") startCmd.Flags().String(criSocket, "", "The cri socket path to be used.") - startCmd.Flags().String(networkPlugin, "", "The name of the network plugin.") - startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\".") + startCmd.Flags().String(networkPlugin, "", "Kubelet network plug-in to use (default: auto)") + startCmd.Flags().Bool(enableDefaultCNI, false, "DEPRECATED: Replaced by --cni=custom") + startCmd.Flags().String(cniFlag, "", "CNI plug-in to use. Valid options: auto, calico, custom, flannel, kindnet (default: auto)") startCmd.Flags().StringSlice(waitComponents, kverify.DefaultWaitList, fmt.Sprintf("comma separated list of Kubernetes components to verify and wait for after starting a cluster. defaults to %q, available options: %q . other acceptable values are 'all' or 'none', 'true' and 'false'", strings.Join(kverify.DefaultWaitList, ","), strings.Join(kverify.AllComponentsList, ","))) startCmd.Flags().Duration(waitTimeout, 6*time.Minute, "max time to wait per Kubernetes core services to be healthy.") startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.") @@ -237,21 +240,6 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k exit.WithCodeT(exit.Config, "Generate unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err}) } - r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)}) - if err != nil { - return cc, config.Node{}, errors.Wrap(err, "new runtime manager") - } - - // Pick good default values for --network-plugin and --enable-default-cni based on runtime. - selectedEnableDefaultCNI := viper.GetBool(enableDefaultCNI) - selectedNetworkPlugin := viper.GetString(networkPlugin) - if r.DefaultCNI() && !cmd.Flags().Changed(networkPlugin) { - selectedNetworkPlugin = "cni" - if !cmd.Flags().Changed(enableDefaultCNI) { - selectedEnableDefaultCNI = true - } - } - repository := viper.GetString(imageRepository) mirrorCountry := strings.ToLower(viper.GetString(imageMirrorCountry)) if strings.ToLower(repository) == "auto" || (mirrorCountry != "" && repository == "") { @@ -275,6 +263,13 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k out.T(out.SuccessType, "Using image repository {{.name}}", out.V{"name": repository}) } + // Backwards compatibility with --enable-default-cni + chosenCNI := viper.GetString(cniFlag) + if viper.GetBool(enableDefaultCNI) && !cmd.Flags().Changed(cniFlag) { + glog.Errorf("Found deprecated --enable-default-cni flag, setting --cni=bridge") + chosenCNI = "bridge" + } + cc = config.ClusterConfig{ Name: ClusterFlagValue(), KeepContext: viper.GetBool(keepContext), @@ -318,16 +313,26 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k FeatureGates: viper.GetString(featureGates), ContainerRuntime: viper.GetString(containerRuntime), CRISocket: viper.GetString(criSocket), - NetworkPlugin: selectedNetworkPlugin, + NetworkPlugin: viper.GetString(networkPlugin), ServiceCIDR: viper.GetString(serviceCIDR), ImageRepository: repository, ExtraOptions: config.ExtraOptions, ShouldLoadCachedImages: viper.GetBool(cacheImages), - EnableDefaultCNI: selectedEnableDefaultCNI, + CNI: chosenCNI, NodePort: viper.GetInt(apiServerPort), }, } cc.VerifyComponents = interpretWaitFlag(*cmd) + + cnm, err := cni.New(cc) + if err != nil { + return cc, config.Node{}, errors.Wrap(err, "cni") + } + + if _, ok := cnm.(cni.Disabled); !ok { + glog.Infof("Found %q CNI - setting NetworkPlugin=cni", cnm) + cc.KubernetesConfig.NetworkPlugin = "cni" + } } r, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime}) @@ -354,6 +359,7 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC validateFlags(cmd, existing.Driver) cc := *existing + if cmd.Flags().Changed(containerRuntime) { cc.KubernetesConfig.ContainerRuntime = viper.GetString(containerRuntime) } @@ -514,10 +520,6 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC cc.KubernetesConfig.CRISocket = viper.GetString(criSocket) } - if cmd.Flags().Changed(criSocket) { - cc.KubernetesConfig.NetworkPlugin = viper.GetString(criSocket) - } - if cmd.Flags().Changed(networkPlugin) { cc.KubernetesConfig.NetworkPlugin = viper.GetString(networkPlugin) } @@ -534,8 +536,15 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC cc.KubernetesConfig.ImageRepository = viper.GetString(imageRepository) } - if cmd.Flags().Changed(enableDefaultCNI) { - cc.KubernetesConfig.EnableDefaultCNI = viper.GetBool(enableDefaultCNI) + if cmd.Flags().Changed(enableDefaultCNI) && !cmd.Flags().Changed(cniFlag) { + if viper.GetBool(enableDefaultCNI) { + glog.Errorf("Found deprecated --enable-default-cni flag, setting --cni=bridge") + cc.KubernetesConfig.CNI = "bridge" + } + } + + if cmd.Flags().Changed(cniFlag) { + cc.KubernetesConfig.CNI = viper.GetString(cniFlag) } if cmd.Flags().Changed(waitComponents) { diff --git a/hack/jenkins/cloudshell_integration_tests_none.sh b/hack/jenkins/cloudshell_integration_tests_none.sh new file mode 100755 index 0000000000..e888c9bfbc --- /dev/null +++ b/hack/jenkins/cloudshell_integration_tests_none.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This script runs the integration tests on a Linux machine for the none Driver + +# The script expects the following env variables: +# MINIKUBE_LOCATION: GIT_COMMIT from upstream build. +# COMMIT: Actual commit ID from upstream build +# EXTRA_BUILD_ARGS (optional): Extra args to be passed into the minikube integrations tests +# access_token: The Github API access token. Injected by the Jenkins credential provider. + + +set -e + +OS_ARCH="linux-amd64" +VM_DRIVER="none" +JOB_NAME="none_cloudshell" + + + +SUDO_PREFIX="sudo -E " +export KUBECONFIG="/root/.kube/config" + +gcloud alpha cloud-shell ssh --boosted "uptime" +gcloud alpha cloud-shell scp --boosted "" + +mkdir -p cron && gsutil -m rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES" +sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP" + +source ./common.sh diff --git a/hack/jenkins/linux_conformance_tests_kvm.sh b/hack/jenkins/linux_conformance_tests_kvm.sh new file mode 100755 index 0000000000..65038e0cdb --- /dev/null +++ b/hack/jenkins/linux_conformance_tests_kvm.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This script runs the integration tests on a Linux machine for the KVM Driver + +# The script expects the following env variables: +# MINIKUBE_LOCATION: GIT_COMMIT from upstream build. +# COMMIT: Actual commit ID from upstream build +# EXTRA_BUILD_ARGS (optional): Extra args to be passed into the minikube integrations tests +# access_token: The Github API access token. Injected by the Jenkins credential provider. + +set -e + +OS_ARCH="linux-amd64" +VM_DRIVER="kvm2" +JOB_NAME="KVM_Linux" +EXPECTED_DEFAULT_DRIVER="kvm2" + +# We pick kvm as our gvisor testbed because it is fast & reliable +EXTRA_TEST_ARGS="-gvisor" + +mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES" +sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP" + +source ./common.sh diff --git a/hack/preload-images/generate.go b/hack/preload-images/generate.go index b7267f74d9..572352cbe8 100644 --- a/hack/preload-images/generate.go +++ b/hack/preload-images/generate.go @@ -65,8 +65,9 @@ func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string if err != nil { return errors.Wrap(err, "kubeadm images") } + if containerRuntime != "docker" { // kic overlay image is only needed by containerd and cri-o https://github.com/kubernetes/minikube/issues/7428 - imgs = append(imgs, kic.OverlayImage) + imgs = append(imgs, images.KindNet("")) } runner := command.NewKICRunner(profile, driver.OCIBinary) diff --git a/pkg/drivers/kic/types.go b/pkg/drivers/kic/types.go index 5d189885ad..99acc614b5 100644 --- a/pkg/drivers/kic/types.go +++ b/pkg/drivers/kic/types.go @@ -27,9 +27,6 @@ const ( Version = "v0.0.10" // SHA of the kic base image baseImageSHA = "f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438" - // OverlayImage is the cni plugin used for overlay image, created by kind. - // CNI plugin image used for kic drivers created by kind. - OverlayImage = "kindest/kindnetd:0.5.4" ) var ( diff --git a/pkg/minikube/bootstrapper/bsutil/files.go b/pkg/minikube/bootstrapper/bsutil/files.go index c18cb0452c..28e91dff4c 100644 --- a/pkg/minikube/bootstrapper/bsutil/files.go +++ b/pkg/minikube/bootstrapper/bsutil/files.go @@ -18,8 +18,12 @@ limitations under the License. package bsutil import ( + "os/exec" "path" + "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/assets" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/vmpath" ) @@ -27,8 +31,6 @@ import ( var KubeadmYamlPath = path.Join(vmpath.GuestEphemeralDir, "kubeadm.yaml") const ( - //DefaultCNIConfigPath is the configuration file for CNI networks - DefaultCNIConfigPath = "/etc/cni/net.d/1-k8s.conf" // KubeletServiceFile is the file for the systemd kubelet.service KubeletServiceFile = "/lib/systemd/system/kubelet.service" // KubeletSystemdConfFile is config for the systemd kubelet.service @@ -38,3 +40,22 @@ const ( // KubeletInitPath is where Sys-V style init script is installed KubeletInitPath = "/etc/init.d/kubelet" ) + +// CopyFiles combines mkdir requests into a single call to reduce load +func CopyFiles(runner command.Runner, files []assets.CopyableFile) error { + dirs := []string{} + for _, f := range files { + dirs = append(dirs, f.GetTargetDir()) + } + args := append([]string{"mkdir", "-p"}, dirs...) + if _, err := runner.RunCmd(exec.Command("sudo", args...)); err != nil { + return errors.Wrap(err, "mkdir") + } + + for _, f := range files { + if err := runner.Copy(f); err != nil { + return errors.Wrapf(err, "copy") + } + } + return nil +} diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 155cd1d494..8a9ef7e713 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -26,6 +26,7 @@ import ( "github.com/golang/glog" "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/ktmpl" + "k8s.io/minikube/pkg/minikube/cni" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" @@ -65,6 +66,18 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana return nil, errors.Wrap(err, "generating extra component config for kubeadm") } + cnm, err := cni.New(cc) + if err != nil { + return nil, errors.Wrap(err, "cni") + } + + podCIDR := cnm.CIDR() + overrideCIDR := k8s.ExtraOptions.Get("pod-network-cidr", Kubeadm) + if overrideCIDR != "" { + podCIDR = overrideCIDR + } + glog.Infof("Using pod CIDR: %s", podCIDR) + opts := struct { CertDir string ServiceCIDR string @@ -87,7 +100,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana }{ CertDir: vmpath.GuestKubernetesCertsDir, ServiceCIDR: constants.DefaultServiceCIDR, - PodSubnet: k8s.ExtraOptions.Get("pod-network-cidr", Kubeadm), + PodSubnet: podCIDR, AdvertiseAddress: n.IP, APIServerPort: nodePort, KubernetesVersion: k8s.KubernetesVersion, diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index d1af43e9ec..4b663f4890 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -25,6 +25,7 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/ktmpl" "k8s.io/minikube/pkg/minikube/bootstrapper/images" + "k8s.io/minikube/pkg/minikube/cni" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" @@ -53,7 +54,12 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage } if k8s.NetworkPlugin != "" { extraOpts["network-plugin"] = k8s.NetworkPlugin + + if k8s.NetworkPlugin == "kubenet" { + extraOpts["pod-cidr"] = cni.DefaultPodCIDR + } } + if _, ok := extraOpts["node-ip"]; !ok { extraOpts["node-ip"] = nc.IP } diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/node_ready.go b/pkg/minikube/bootstrapper/bsutil/kverify/node_ready.go index 1f2863c537..cfb25b8222 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/node_ready.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/node_ready.go @@ -32,7 +32,7 @@ import ( // WaitForNodeReady waits till kube client reports node status as "ready" func WaitForNodeReady(cs *kubernetes.Clientset, timeout time.Duration) error { - glog.Info("waiting for node status to be ready ...") + glog.Infof("waiting %s for node status to be ready ...", timeout) start := time.Now() defer func() { glog.Infof("duration metric: took %s to wait for WaitForNodeReady...", time.Since(start)) diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/system_pods.go b/pkg/minikube/bootstrapper/bsutil/kverify/system_pods.go index 68cb52c5d2..bd03ecec5b 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/system_pods.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/system_pods.go @@ -36,6 +36,7 @@ import ( "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/logs" "k8s.io/minikube/pkg/minikube/sysinit" + "k8s.io/minikube/pkg/util/retry" ) // WaitForSystemPods verifies essential pods for running kurnetes is running @@ -83,11 +84,15 @@ func ExpectAppsRunning(cs *kubernetes.Clientset, expected []string) error { if err != nil { return err } + glog.Infof("%d kube-system pods found", len(pods.Items)) for _, pod := range pods.Items { + glog.Infof(podStatusMsg(pod)) + if pod.Status.Phase != core.PodRunning { continue } + for k, v := range pod.ObjectMeta.Labels { if k == "component" || k == "k8s-app" { found[v] = true @@ -112,15 +117,16 @@ func WaitForAppsRunning(cs *kubernetes.Clientset, expected []string, timeout tim glog.Info("waiting for k8s-apps to be running ...") start := time.Now() - checkRunning := func() (bool, error) { - if err := ExpectAppsRunning(cs, expected); err != nil { - return false, nil + checkRunning := func() error { + err := ExpectAppsRunning(cs, expected) + if err != nil { + glog.Warningf("expect apps running failed: %v", err) } - return true, nil + return err } - if err := wait.PollImmediate(kconst.APICallRetryInterval, timeout, checkRunning); err != nil { - return errors.Wrapf(err, "checking k8s-apps to be running") + if err := retry.Expo(checkRunning, kconst.APICallRetryInterval, timeout); err != nil { + return errors.Wrapf(err, "expected k8s-apps") } glog.Infof("duration metric: took %s to wait for k8s-apps to be running ...", time.Since(start)) return nil diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml index 9bf9a7d5b7..32d980a937 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml @@ -35,7 +35,7 @@ schedulerExtraArgs: kubernetesVersion: v1.12.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml index dfc17e6b0d..01518ed1ea 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml @@ -35,7 +35,7 @@ schedulerExtraArgs: kubernetesVersion: v1.12.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml index 38f448fa36..29f9650f36 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml @@ -43,7 +43,7 @@ schedulerExtraArgs: kubernetesVersion: v1.12.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml index 61ed381d2e..51390464ca 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml @@ -35,7 +35,7 @@ schedulerExtraArgs: kubernetesVersion: v1.12.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml index e4bcbe4230..8ff00118e4 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml @@ -35,7 +35,7 @@ schedulerExtraArgs: kubernetesVersion: v1.12.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml index 37db5684cb..9dcfab0105 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml @@ -35,7 +35,7 @@ schedulerExtraArgs: kubernetesVersion: v1.12.0 networking: dnsDomain: 1.1.1.1 - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml index fe2ca6c928..3c3861f781 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml @@ -36,7 +36,7 @@ schedulerExtraArgs: kubernetesVersion: v1.12.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml index 5c2a546a95..8c83156414 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml @@ -40,7 +40,7 @@ schedulerExtraArgs: kubernetesVersion: v1.12.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml index 7d565812c7..b3cbfd3006 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml @@ -35,7 +35,7 @@ schedulerExtraArgs: kubernetesVersion: v1.13.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml index 1ba62522b0..4dc856ddbd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml @@ -35,7 +35,7 @@ schedulerExtraArgs: kubernetesVersion: v1.13.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml index 5220df087a..2f4e4276d2 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml @@ -43,7 +43,7 @@ schedulerExtraArgs: kubernetesVersion: v1.13.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml index a9371f715a..9340dc30eb 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml @@ -35,7 +35,7 @@ schedulerExtraArgs: kubernetesVersion: v1.13.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml index be782769c7..dcc9d5f8ed 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml @@ -35,7 +35,7 @@ schedulerExtraArgs: kubernetesVersion: v1.13.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml index a93c4ea75c..33cb24bbed 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml @@ -35,7 +35,7 @@ schedulerExtraArgs: kubernetesVersion: v1.13.0 networking: dnsDomain: 1.1.1.1 - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml index b8a3c2049d..24bcc1fc00 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml @@ -36,7 +36,7 @@ schedulerExtraArgs: kubernetesVersion: v1.13.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml index 5d6ff553d5..648f8118fb 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml @@ -40,7 +40,7 @@ schedulerExtraArgs: kubernetesVersion: v1.13.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml index 702719fd37..dcd523eba3 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml @@ -42,7 +42,7 @@ scheduler: kubernetesVersion: v1.14.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -58,5 +58,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml index 91e65cd691..e86e0deb1b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml @@ -42,7 +42,7 @@ scheduler: kubernetesVersion: v1.14.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -58,5 +58,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml index 7413507682..cb8d893224 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml @@ -52,7 +52,7 @@ scheduler: kubernetesVersion: v1.14.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -68,6 +68,6 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 mode: "iptables" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml index e792f790ff..4497bdfe18 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml @@ -42,7 +42,7 @@ scheduler: kubernetesVersion: v1.14.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -58,5 +58,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml index 0fe77722f2..4a92ebe379 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml @@ -42,7 +42,7 @@ scheduler: kubernetesVersion: v1.14.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -58,5 +58,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml index 29fbc7f95c..6ec12d5789 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml @@ -42,7 +42,7 @@ scheduler: kubernetesVersion: v1.14.0 networking: dnsDomain: 1.1.1.1 - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -58,5 +58,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml index c1e24e8977..19cc68b3cb 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml @@ -43,7 +43,7 @@ scheduler: kubernetesVersion: v1.14.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -59,5 +59,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml index 7b444b1104..ae37586beb 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml @@ -49,7 +49,7 @@ scheduler: kubernetesVersion: v1.14.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -65,6 +65,6 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 mode: "iptables" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml index 58fa215e13..08677e6a76 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml @@ -42,7 +42,7 @@ scheduler: kubernetesVersion: v1.15.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -58,5 +58,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml index ad7958b661..8cc0aa6b34 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml @@ -42,7 +42,7 @@ scheduler: kubernetesVersion: v1.15.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -58,5 +58,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml index bbc533e065..c9ef244d85 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml @@ -52,7 +52,7 @@ scheduler: kubernetesVersion: v1.15.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -68,6 +68,6 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 mode: "iptables" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml index 2a9df59835..ac9503b21d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml @@ -42,7 +42,7 @@ scheduler: kubernetesVersion: v1.15.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -58,5 +58,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml index e99a5ccbbe..24ca4c24e3 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml @@ -42,7 +42,7 @@ scheduler: kubernetesVersion: v1.15.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -58,5 +58,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml index d6f8ff36cf..7f74ab0d18 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml @@ -42,7 +42,7 @@ scheduler: kubernetesVersion: v1.15.0 networking: dnsDomain: 1.1.1.1 - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -58,5 +58,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml index 571997a3e2..12e1db36a6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml @@ -43,7 +43,7 @@ scheduler: kubernetesVersion: v1.15.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -59,5 +59,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml index 0c66800678..066a4215eb 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml @@ -49,7 +49,7 @@ scheduler: kubernetesVersion: v1.15.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -65,6 +65,6 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 mode: "iptables" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml index 2e67e99072..afc655fa4a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml @@ -42,7 +42,7 @@ scheduler: kubernetesVersion: v1.16.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -58,5 +58,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml index 61a1e30a0b..e7b6c4ca6a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml @@ -42,7 +42,7 @@ scheduler: kubernetesVersion: v1.16.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -58,5 +58,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml index ef917ad219..1e51884514 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml @@ -52,7 +52,7 @@ scheduler: kubernetesVersion: v1.16.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -68,6 +68,6 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 mode: "iptables" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml index e013617183..9a312ba910 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml @@ -42,7 +42,7 @@ scheduler: kubernetesVersion: v1.16.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -58,5 +58,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml index ad9ee401f3..92eee59c65 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml @@ -42,7 +42,7 @@ scheduler: kubernetesVersion: v1.16.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -58,5 +58,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml index a4763e7315..33e9d57349 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml @@ -42,7 +42,7 @@ scheduler: kubernetesVersion: v1.16.0 networking: dnsDomain: 1.1.1.1 - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -58,5 +58,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml index 659d042c69..f9776edf9e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml @@ -43,7 +43,7 @@ scheduler: kubernetesVersion: v1.16.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -59,5 +59,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml index cc33fa8b07..0805d9b79c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml @@ -49,7 +49,7 @@ scheduler: kubernetesVersion: v1.16.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -65,6 +65,6 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 mode: "iptables" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml index 8b56135fb4..969ae5e003 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml @@ -40,7 +40,7 @@ scheduler: kubernetesVersion: v1.17.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -56,5 +56,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml index 09bea3de02..0d031935cd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml @@ -40,7 +40,7 @@ scheduler: kubernetesVersion: v1.17.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -56,5 +56,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml index 7e412f6154..13880f8c54 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml @@ -50,7 +50,7 @@ scheduler: kubernetesVersion: v1.17.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -66,6 +66,6 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 mode: "iptables" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml index 146d43fc09..f2779f7eb8 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml @@ -40,7 +40,7 @@ scheduler: kubernetesVersion: v1.17.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -56,5 +56,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml index 47726bf424..bf91176d5b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml @@ -40,7 +40,7 @@ scheduler: kubernetesVersion: v1.17.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -56,5 +56,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml index c3887481fc..180bbb42db 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml @@ -40,7 +40,7 @@ scheduler: kubernetesVersion: v1.17.0 networking: dnsDomain: 1.1.1.1 - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -56,5 +56,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml index 8a9df623b4..afb3d99cb4 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml @@ -41,7 +41,7 @@ scheduler: kubernetesVersion: v1.17.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -57,5 +57,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml index 1905b61add..379634cdf8 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml @@ -47,7 +47,7 @@ scheduler: kubernetesVersion: v1.17.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -63,6 +63,6 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 mode: "iptables" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml index 14fa7eeecf..d82d733a4f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml @@ -40,7 +40,7 @@ scheduler: kubernetesVersion: v1.18.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -56,5 +56,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml index 14061aeee3..69e9fd8882 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml @@ -40,7 +40,7 @@ scheduler: kubernetesVersion: v1.18.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -56,5 +56,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml index e6881e74d4..773823618d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml @@ -50,7 +50,7 @@ scheduler: kubernetesVersion: v1.18.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -66,6 +66,6 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 mode: "iptables" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml index 945d96bfa4..c500c65a29 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml @@ -40,7 +40,7 @@ scheduler: kubernetesVersion: v1.18.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -56,5 +56,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml index 3a02b253e0..56bfa738f5 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml @@ -40,7 +40,7 @@ scheduler: kubernetesVersion: v1.18.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -56,5 +56,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml index 1e9f0f799f..11c267c4a9 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml @@ -40,7 +40,7 @@ scheduler: kubernetesVersion: v1.18.0 networking: dnsDomain: 1.1.1.1 - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -56,5 +56,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml index aeee10217a..5484739ddf 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml @@ -41,7 +41,7 @@ scheduler: kubernetesVersion: v1.18.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -57,5 +57,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml index 6c0802c98a..059ab4d371 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml @@ -47,7 +47,7 @@ scheduler: kubernetesVersion: v1.18.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -63,6 +63,6 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 mode: "iptables" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml index 03d4f7d08e..415cf6cf61 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml @@ -40,7 +40,7 @@ scheduler: kubernetesVersion: v1.19.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -56,5 +56,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml index 749f97330b..db906e8825 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml @@ -40,7 +40,7 @@ scheduler: kubernetesVersion: v1.19.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -56,5 +56,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml index 90bbedc541..7e60806d92 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml @@ -50,7 +50,7 @@ scheduler: kubernetesVersion: v1.19.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -66,6 +66,6 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 mode: "iptables" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml index 28fc882e48..9abb40d7c8 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml @@ -40,7 +40,7 @@ scheduler: kubernetesVersion: v1.19.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -56,5 +56,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml index 3d304d5338..2be2c468d4 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml @@ -40,7 +40,7 @@ scheduler: kubernetesVersion: v1.19.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -56,5 +56,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml index cde5f7d4a6..bdf855e89c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml @@ -40,7 +40,7 @@ scheduler: kubernetesVersion: v1.19.0 networking: dnsDomain: 1.1.1.1 - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -56,5 +56,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml index f4ec663e82..cb38e0d61c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml @@ -41,7 +41,7 @@ scheduler: kubernetesVersion: v1.19.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -57,5 +57,5 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml index 3e1a3e802f..7230f8180d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml @@ -47,7 +47,7 @@ scheduler: kubernetesVersion: v1.19.0 networking: dnsDomain: cluster.local - podSubnet: "" + podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 @@ -63,6 +63,6 @@ failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: "" +clusterCIDR: "10.244.0.0/16" metricsBindAddress: 1.1.1.1:10249 mode: "iptables" diff --git a/pkg/minikube/bootstrapper/images/images.go b/pkg/minikube/bootstrapper/images/images.go index addc83baa8..c243d20f91 100644 --- a/pkg/minikube/bootstrapper/images/images.go +++ b/pkg/minikube/bootstrapper/images/images.go @@ -121,6 +121,7 @@ func auxiliary(mirror string) []string { storageProvisioner(mirror), dashboardFrontend(mirror), dashboardMetrics(mirror), + // NOTE: kindnet is also used when the Docker driver is used with a non-Docker runtime } } @@ -146,3 +147,11 @@ func dashboardMetrics(repo string) string { // See 'dashboard-metrics-scraper' in deploy/addons/dashboard/dashboard-dp.yaml return path.Join(repo, "metrics-scraper:v1.0.2") } + +// KindNet returns the image used for kindnet +func KindNet(repo string) string { + if repo == "" { + repo = "kindest" + } + return path.Join(repo, "kindnetd:0.5.4") +} diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 2e0cb55e3e..2c48fd147e 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -17,7 +17,6 @@ limitations under the License. package kubeadm import ( - "bytes" "context" "os/exec" "path" @@ -41,7 +40,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" - "k8s.io/minikube/pkg/drivers/kic" "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/kapi" "k8s.io/minikube/pkg/minikube/assets" @@ -49,6 +47,7 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify" "k8s.io/minikube/pkg/minikube/bootstrapper/images" + "k8s.io/minikube/pkg/minikube/cni" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" @@ -231,6 +230,25 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { return errors.Wrap(err, "run") } + cnm, err := cni.New(cfg) + if err != nil { + return errors.Wrap(err, "cni config") + } + + if _, ok := cnm.(cni.Disabled); !ok { + out.T(out.CNI, "Configuring {{.name}} (Container Networking Interface) ...", out.V{"name": cnm.String()}) + + if err := cnm.Apply(k.c, []cni.Runner{k.c}); err != nil { + return errors.Wrap(err, "cni apply") + } + + if cfg.KubernetesConfig.ContainerRuntime == "crio" { + if err := sysinit.New(k.c).Restart("crio"); err != nil { + glog.Errorf("failed to restart CRI: %v", err) + } + } + } + var wg sync.WaitGroup wg.Add(3) @@ -239,12 +257,6 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { if err := k.elevateKubeSystemPrivileges(cfg); err != nil { glog.Errorf("unable to create cluster role binding, some addons might not work: %v", err) } - // the overlay is required for containerd and cri-o runtime: see #7428 - if config.MultiNode(cfg) || (driver.IsKIC(cfg.Driver) && cfg.KubernetesConfig.ContainerRuntime != "docker") { - if err := k.applyKICOverlay(cfg); err != nil { - glog.Errorf("failed to apply kic overlay: %v", err) - } - } wg.Done() }() @@ -769,13 +781,6 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru files = append(files, assets.NewMemoryAssetTarget(kubeadmCfg, bsutil.KubeadmYamlPath+".new", "0640")) } - // Copy the default CNI config (k8s.conf), so that kubelet can successfully - // start a Pod in the case a user hasn't manually installed any CNI plugin - // and minikube was started with "--extra-config=kubelet.network-plugin=cni". - if cfg.KubernetesConfig.EnableDefaultCNI && !config.MultiNode(cfg) { - files = append(files, assets.NewMemoryAssetTarget([]byte(defaultCNIConfig), bsutil.DefaultCNIConfigPath, "0644")) - } - // Installs compatibility shims for non-systemd environments kubeletPath := path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubelet") shims, err := sm.GenerateInitShim("kubelet", kubeletPath, bsutil.KubeletSystemdConfFile) @@ -784,7 +789,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru } files = append(files, shims...) - if err := copyFiles(k.c, files); err != nil { + if err := bsutil.CopyFiles(k.c, files); err != nil { return errors.Wrap(err, "copy") } @@ -800,65 +805,11 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru return sm.Start("kubelet") } -func copyFiles(runner command.Runner, files []assets.CopyableFile) error { - // Combine mkdir request into a single call to reduce load - dirs := []string{} - for _, f := range files { - dirs = append(dirs, f.GetTargetDir()) - } - args := append([]string{"mkdir", "-p"}, dirs...) - if _, err := runner.RunCmd(exec.Command("sudo", args...)); err != nil { - return errors.Wrap(err, "mkdir") - } - - for _, f := range files { - if err := runner.Copy(f); err != nil { - return errors.Wrapf(err, "copy") - } - } - return nil -} - // kubectlPath returns the path to the kubelet func kubectlPath(cfg config.ClusterConfig) string { return path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl") } -// applyKICOverlay applies the CNI plugin needed to make kic work -func (k *Bootstrapper) applyKICOverlay(cfg config.ClusterConfig) error { - b := bytes.Buffer{} - if err := kicCNIConfig.Execute(&b, struct{ ImageName string }{ImageName: kic.OverlayImage}); err != nil { - return err - } - - ko := path.Join(vmpath.GuestEphemeralDir, "kic_overlay.yaml") - f := assets.NewMemoryAssetTarget(b.Bytes(), ko, "0644") - - if err := k.c.Copy(f); err != nil { - return errors.Wrapf(err, "copy") - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - cmd := exec.CommandContext(ctx, "sudo", kubectlPath(cfg), "apply", - fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")), - "-f", ko) - - if rr, err := k.c.RunCmd(cmd); err != nil { - return errors.Wrapf(err, "cmd: %s output: %s", rr.Command(), rr.Output()) - } - - // Inform cri-o that the CNI has changed - if cfg.KubernetesConfig.ContainerRuntime == "crio" { - if err := sysinit.New(k.c).Restart("crio"); err != nil { - return errors.Wrap(err, "restart crio") - } - } - - return nil -} - // applyNodeLabels applies minikube labels to all the nodes func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error { // time cluster was created. time format is based on ISO 8601 (RFC 3339) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go.rej b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go.rej new file mode 100644 index 0000000000..213b384a71 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go.rej @@ -0,0 +1,16 @@ +*************** +*** 307,312 **** + return nil + } + + out.ErrT(out.Embarrassed, "Unable to restart cluster, will reset it: {{.error}}", out.V{"error": rerr}) + if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil { + glog.Warningf("delete failed: %v", err) +--- 307,313 ---- + return nil + } + ++ panic(fmt.Errorf("restart failed: %v", err)) + out.ErrT(out.Embarrassed, "Unable to restart cluster, will reset it: {{.error}}", out.V{"error": rerr}) + if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil { + glog.Warningf("delete failed: %v", err) diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index aa2fbbe6df..4050be5ee8 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -62,19 +62,20 @@ func Bootstrapper(api libmachine.API, bootstrapperName string, cc config.Cluster } // ControlPlaneBootstrapper returns the bootstrapper for the cluster's control plane -func ControlPlaneBootstrapper(mAPI libmachine.API, cc *config.ClusterConfig, bootstrapperName string) (bootstrapper.Bootstrapper, error) { +func ControlPlaneBootstrapper(mAPI libmachine.API, cc *config.ClusterConfig, bootstrapperName string) (bootstrapper.Bootstrapper, command.Runner, error) { cp, err := config.PrimaryControlPlane(cc) if err != nil { - return nil, errors.Wrap(err, "getting primary control plane") + return nil, nil, errors.Wrap(err, "getting primary control plane") } h, err := machine.LoadHost(mAPI, driver.MachineName(*cc, cp)) if err != nil { - return nil, errors.Wrap(err, "getting control plane host") + return nil, nil, errors.Wrap(err, "getting control plane host") } cpr, err := machine.CommandRunner(h) if err != nil { - return nil, errors.Wrap(err, "getting control plane command runner") + return nil, nil, errors.Wrap(err, "getting control plane command runner") } - return Bootstrapper(mAPI, bootstrapperName, *cc, cpr) + bs, err := Bootstrapper(mAPI, bootstrapperName, *cc, cpr) + return bs, cpr, err } diff --git a/pkg/minikube/cni/bridge.go b/pkg/minikube/cni/bridge.go new file mode 100644 index 0000000000..0340b92fc9 --- /dev/null +++ b/pkg/minikube/cni/bridge.go @@ -0,0 +1,83 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cni + +import ( + "bytes" + "text/template" + + "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/assets" + "k8s.io/minikube/pkg/minikube/config" +) + +// bridge is what minikube defaulted to when `--enable-default-cni=true` +// https://github.com/containernetworking/plugins/blob/master/plugins/main/bridge/README.md + +var bridgeConf = template.Must(template.New("bridge").Parse(` +{ + "cniVersion": "0.3.1", + "name": "bridge", + "type": "bridge", + "bridge": "bridge", + "addIf": "true", + "isDefaultGateway": true, + "forceAddress": false, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "subnet": "{{.PodCIDR}}" + } +} +`)) + +// Bridge is a CNI manager than does nothing +type Bridge struct { + cc config.ClusterConfig +} + +// String returns a string representation of this CNI +func (c Bridge) String() string { + return "Bridge CNI" +} + +func (c Bridge) netconf() (assets.CopyableFile, error) { + input := &tmplInput{PodCIDR: DefaultPodCIDR} + + b := bytes.Buffer{} + if err := bridgeConf.Execute(&b, input); err != nil { + return nil, err + } + + return assets.NewMemoryAssetTarget(b.Bytes(), "/etc/cni/net.d/1-k8s.conf", "0644"), nil +} + +// Apply enables the CNI +func (c Bridge) Apply(_ Runner, nodes []Runner) error { + f, err := c.netconf() + if err != nil { + return errors.Wrap(err, "netconf") + } + + return applyNetConf(nodes, f) +} + +// CIDR returns the default CIDR used by this CNI +func (c Bridge) CIDR() string { + return DefaultPodCIDR +} diff --git a/pkg/minikube/cni/cni.go b/pkg/minikube/cni/cni.go new file mode 100644 index 0000000000..947ac8d20f --- /dev/null +++ b/pkg/minikube/cni/cni.go @@ -0,0 +1,160 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cni configures the Container Networking Interface +package cni + +import ( + "context" + "fmt" + "os/exec" + "path" + "time" + + "github.com/golang/glog" + "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/assets" + "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/vmpath" +) + +const ( + // DefaultPodCIDR is the default CIDR to use in minikube CNI's. + DefaultPodCIDR = "10.244.0.0/16" +) + +// Runner is the subset of command.Runner this package consumes +type Runner interface { + RunCmd(cmd *exec.Cmd) (*command.RunResult, error) + Copy(assets.CopyableFile) error +} + +// Manager is a common interface for CNI +type Manager interface { + // Enable enables the CNI + Apply(Runner, []Runner) error + + // CIDR returns the default CIDR used by this CNI + CIDR() string + + // String representation + String() string +} + +// tmplInputs are inputs to CNI templates +type tmplInput struct { + ImageName string + PodCIDR string + DefaultRoute string +} + +// New returns a new CNI manager +func New(cc config.ClusterConfig) (Manager, error) { + if cc.KubernetesConfig.NetworkPlugin != "" && cc.KubernetesConfig.NetworkPlugin != "cni" { + glog.Infof("network plugin configured as %q, returning disabled", cc.KubernetesConfig.NetworkPlugin) + return Disabled{}, nil + } + + glog.Infof("Creating CNI manager for %q", cc.KubernetesConfig.CNI) + + switch cc.KubernetesConfig.CNI { + case "", "auto": + return chooseDefault(cc), nil + case "false": + return Disabled{cc: cc}, nil + case "kindnet", "true": + return KindNet{cc: cc}, nil + case "bridge": + return Bridge{cc: cc}, nil + case "flannel": + return Flannel{cc: cc}, nil + default: + return NewCustom(cc, cc.KubernetesConfig.CNI) + } +} + +func chooseDefault(cc config.ClusterConfig) Manager { + // For backwards compatibility with older profiles using --enable-default-cni + if cc.KubernetesConfig.EnableDefaultCNI { + glog.Infof("EnableDefaultCNI is true, recommending bridge") + return Bridge{} + } + + if cc.KubernetesConfig.ContainerRuntime != "docker" { + if driver.IsKIC(cc.Driver) { + glog.Infof("%q driver + %s runtime found, recommending kindnet", cc.Driver, cc.KubernetesConfig.ContainerRuntime) + return KindNet{cc: cc} + } + glog.Infof("%q driver + %s runtime found, recommending bridge", cc.Driver, cc.KubernetesConfig.ContainerRuntime) + return Bridge{cc: cc} + } + + if len(cc.Nodes) > 1 { + glog.Infof("%d nodes found, recommending kindnet", len(cc.Nodes)) + return KindNet{cc: cc} + } + + glog.Infof("CNI unnecessary in this configuration, recommending no CNI") + return Disabled{} +} + +// manifestPath returns the path to the CNI manifest +func manifestPath() string { + return path.Join(vmpath.GuestEphemeralDir, "cni.yaml") +} + +// manifestAsset returns a copyable asset for the CNI manifest +func manifestAsset(b []byte) assets.CopyableFile { + return assets.NewMemoryAssetTarget(b, manifestPath(), "0644") +} + +// kubectlPath returns the path to the kubelet +func kubectlPath(cc config.ClusterConfig) string { + return path.Join(vmpath.GuestPersistentDir, "binaries", cc.KubernetesConfig.KubernetesVersion, "kubectl") +} + +// applyManifest applies a CNI manifest +func applyManifest(cc config.ClusterConfig, r Runner, f assets.CopyableFile) error { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + kubectl := kubectlPath(cc) + glog.Infof("applying CNI manifest using %s ...", kubectl) + + if err := r.Copy(f); err != nil { + return errors.Wrapf(err, "copy") + } + + cmd := exec.CommandContext(ctx, "sudo", kubectl, "apply", fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")), "-f", manifestPath()) + if rr, err := r.RunCmd(cmd); err != nil { + return errors.Wrapf(err, "cmd: %s output: %s", rr.Command(), rr.Output()) + } + + return nil +} + +// applyNetConf applies a netconf file across nodes +func applyNetConf(rs []Runner, f assets.CopyableFile) error { + for _, r := range rs { + if err := r.Copy(f); err != nil { + return errors.Wrapf(err, "copy") + } + } + + return nil +} diff --git a/pkg/minikube/cni/custom.go b/pkg/minikube/cni/custom.go new file mode 100644 index 0000000000..a857a55483 --- /dev/null +++ b/pkg/minikube/cni/custom.go @@ -0,0 +1,66 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cni + +import ( + "fmt" + "os" + "path" + + "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/assets" + "k8s.io/minikube/pkg/minikube/config" +) + +// Custom is a CNI manager than applies a user-specified manifest +type Custom struct { + cc config.ClusterConfig + manifest string +} + +// String returns a string representation of this CNI +func (c Custom) String() string { + return fmt.Sprintf("Custom (%s)", c.manifest) +} + +// NewCustom returns a well-formed Custom CNI manager +func NewCustom(cc config.ClusterConfig, manifest string) (Custom, error) { + _, err := os.Stat(manifest) + if err != nil { + return Custom{}, errors.Wrap(err, "stat") + } + + return Custom{ + cc: cc, + manifest: manifest, + }, nil +} + +// Apply enables the CNI +func (c Custom) Apply(master Runner, nodes []Runner) error { + m, err := assets.NewFileAsset(c.manifest, path.Dir(manifestPath()), path.Base(manifestPath()), "0644") + if err != nil { + return errors.Wrap(err, "manifest") + } + + return applyManifest(c.cc, master, m) +} + +// CIDR returns the default CIDR used by this CNI +func (c Custom) CIDR() string { + return DefaultPodCIDR +} diff --git a/pkg/minikube/cni/disabled.go b/pkg/minikube/cni/disabled.go new file mode 100644 index 0000000000..9eb49d729d --- /dev/null +++ b/pkg/minikube/cni/disabled.go @@ -0,0 +1,51 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cni + +import ( + "github.com/golang/glog" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" +) + +// Disabled is a CNI manager than does nothing +type Disabled struct { + cc config.ClusterConfig +} + +// String returns a string representation +func (c Disabled) String() string { + return "Disabled" +} + +// Apply enables the CNI +func (c Disabled) Apply(master Runner, nodes []Runner) error { + if driver.IsKIC(c.cc.Driver) && c.cc.KubernetesConfig.ContainerRuntime != "docker" { + glog.Warningf("CNI is recommended for %q driver and %q runtime - expect networking issues", c.cc.Driver, c.cc.KubernetesConfig.ContainerRuntime) + } + + if len(c.cc.Nodes) > 1 { + glog.Warningf("CNI is recommended for multi-node clusters - expect networking issues") + } + + return nil +} + +// CIDR returns the default CIDR used by this CNI +func (c Disabled) CIDR() string { + return "" +} diff --git a/pkg/minikube/cni/flannel.go b/pkg/minikube/cni/flannel.go new file mode 100644 index 0000000000..f8a8cf30fa --- /dev/null +++ b/pkg/minikube/cni/flannel.go @@ -0,0 +1,646 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cni + +import ( + "k8s.io/minikube/pkg/minikube/config" +) + +// From https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml +var flannelTmpl = `--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp.flannel.unprivileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default + seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default + apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default + apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default +spec: + privileged: false + volumes: + - configMap + - secret + - emptyDir + - hostPath + allowedHostPaths: + - pathPrefix: "/etc/cni/net.d" + - pathPrefix: "/etc/kube-flannel" + - pathPrefix: "/run/flannel" + readOnlyRootFilesystem: false + # Users and groups + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny + # Privilege Escalation + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + # Capabilities + allowedCapabilities: ['NET_ADMIN'] + defaultAddCapabilities: [] + requiredDropCapabilities: [] + # Host namespaces + hostPID: false + hostIPC: false + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + # SELinux + seLinux: + # SELinux is unused in CaaSP + rule: 'RunAsAny' +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +rules: + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['psp.flannel.unprivileged'] + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: kube-system +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-system + labels: + tier: node + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "10.244.0.0/16", + "Backend": { + "Type": "vxlan" + } + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-amd64 + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - key: kubernetes.io/arch + operator: In + values: + - amd64 + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.12.0-amd64 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.12.0-amd64 + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-arm64 + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - key: kubernetes.io/arch + operator: In + values: + - arm64 + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.12.0-arm64 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.12.0-arm64 + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-arm + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - key: kubernetes.io/arch + operator: In + values: + - arm + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.12.0-arm + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.12.0-arm + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-ppc64le + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - key: kubernetes.io/arch + operator: In + values: + - ppc64le + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.12.0-ppc64le + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.12.0-ppc64le + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-s390x + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - key: kubernetes.io/arch + operator: In + values: + - s390x + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.12.0-s390x + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.12.0-s390x + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +` + +// Flannel is the Flannel CNI manager +type Flannel struct { + cc config.ClusterConfig +} + +// String returns a string representation of this CNI +func (c Flannel) String() string { + return "Flannel" +} + +// Apply enables the CNI +func (c Flannel) Apply(master Runner, nodes []Runner) error { + return applyManifest(c.cc, master, manifestAsset([]byte(flannelTmpl))) +} + +// CIDR returns the default CIDR used by this CNI +func (c Flannel) CIDR() string { + return DefaultPodCIDR +} diff --git a/pkg/minikube/bootstrapper/kubeadm/default_cni.go b/pkg/minikube/cni/kindnet.go similarity index 68% rename from pkg/minikube/bootstrapper/kubeadm/default_cni.go rename to pkg/minikube/cni/kindnet.go index fb5b0c55f5..fc6c178f87 100644 --- a/pkg/minikube/bootstrapper/kubeadm/default_cni.go +++ b/pkg/minikube/cni/kindnet.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors All rights reserved. +Copyright 2020 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,39 +14,19 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubeadm +package cni -import "html/template" +import ( + "bytes" + "text/template" -// defaultCNIConfig is the CNI config which is provisioned when --enable-default-cni -// has been passed to `minikube start`. -// -// The config is being written to /etc/cni/net.d/k8s.conf. -const defaultCNIConfig = ` -{ - "cniVersion": "0.3.0", - "name": "rkt.kubernetes.io", - "type": "bridge", - "bridge": "mybridge", - "mtu": 1460, - "addIf": "true", - "isGateway": true, - "ipMasq": true, - "ipam": { - "type": "host-local", - "subnet": "10.1.0.0/16", - "gateway": "10.1.0.1", - "routes": [ - { - "dst": "0.0.0.0/0" - } - ] - } -} -` + "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/assets" + "k8s.io/minikube/pkg/minikube/bootstrapper/images" + "k8s.io/minikube/pkg/minikube/config" +) -// kicCNIConfig is the cni plugin needed for kic uses cni plugin created by kind https://github.com/kubernetes-sigs/kind/blob/03a4b519067dc308308cce735065c47a6fda1583/pkg/build/node/cni.go -var kicCNIConfig = template.Must(template.New("kubeletServiceTemplate").Parse(`--- +var kindNetManifest = template.Must(template.New("kindnet").Parse(`--- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -125,7 +105,7 @@ spec: fieldRef: fieldPath: status.podIP - name: POD_SUBNET - value: 10.244.0.0/16 + value: {{.PodCIDR}} volumeMounts: - name: cni-cfg mountPath: /etc/cni/net.d @@ -160,3 +140,42 @@ spec: --- `)) + +// KindNet is the KindNet CNI manager +type KindNet struct { + cc config.ClusterConfig +} + +// String returns a string representation of this CNI +func (c KindNet) String() string { + return "CNI" +} + +// manifest returns a Kubernetes manifest for a CNI +func (c KindNet) manifest() (assets.CopyableFile, error) { + input := &tmplInput{ + DefaultRoute: "0.0.0.0/0", // assumes IPv4 + PodCIDR: DefaultPodCIDR, + ImageName: images.KindNet(c.cc.KubernetesConfig.ImageRepository), + } + + b := bytes.Buffer{} + if err := kindNetManifest.Execute(&b, input); err != nil { + return nil, err + } + return manifestAsset(b.Bytes()), nil +} + +// Apply enables the CNI +func (c KindNet) Apply(master Runner, nodes []Runner) error { + m, err := c.manifest() + if err != nil { + return errors.Wrap(err, "manifest") + } + return applyManifest(c.cc, master, m) +} + +// CIDR returns the default CIDR used by this CNI +func (c KindNet) CIDR() string { + return DefaultPodCIDR +} diff --git a/pkg/minikube/config/config.go b/pkg/minikube/config/config.go index eb7c9ef004..43d19792ba 100644 --- a/pkg/minikube/config/config.go +++ b/pkg/minikube/config/config.go @@ -202,18 +202,6 @@ func (c *simpleConfigLoader) WriteConfigToFile(profileName string, cc *ClusterCo return ioutil.WriteFile(path, contents, 0644) } -// MultiNodeCNIConfig add default CNI config needed for multinode clusters and saves off the config -func MultiNodeCNIConfig(cc *ClusterConfig) error { - if cc.KubernetesConfig.ExtraOptions.Get("pod-network-cidr", "kubeadm") == "" { - cc.KubernetesConfig.NetworkPlugin = "cni" - if err := cc.KubernetesConfig.ExtraOptions.Set(fmt.Sprintf("kubeadm.pod-network-cidr=%s", DefaultPodCIDR)); err != nil { - return err - } - return SaveProfile(cc.Name, cc) - } - return nil -} - // MultiNode returns true if the cluster has multiple nodes or if the request is asking for multinode func MultiNode(cc ClusterConfig) bool { if len(cc.Nodes) > 1 { diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index 1b6b759f52..80e52fc75c 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -137,12 +137,6 @@ func SaveNode(cfg *ClusterConfig, node *Node) error { cfg.Nodes = append(cfg.Nodes, *node) } - if MultiNode(*cfg) { - if err := MultiNodeCNIConfig(cfg); err != nil { - return err - } - } - return SaveProfile(viper.GetString(ProfileName), cfg) } diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index f9ebbf5273..6adfb82124 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -22,14 +22,6 @@ import ( "github.com/blang/semver" ) -const ( - // DefaultNetwork is the Docker default bridge network named "bridge" - // (https://docs.docker.com/network/bridge/#use-the-default-bridge-network) - DefaultNetwork = "bridge" - // DefaultPodCIDR is The CIDR to be used for pods inside the node. - DefaultPodCIDR = "10.244.0.0/16" -) - // Profile represents a minikube profile type Profile struct { Name string @@ -96,7 +88,9 @@ type KubernetesConfig struct { ExtraOptions ExtraOptionSlice ShouldLoadCachedImages bool - EnableDefaultCNI bool + + EnableDefaultCNI bool // deprecated in preference to CNI + CNI string // CNI to use // We need to keep these in the short term for backwards compatibility NodeIP string diff --git a/pkg/minikube/cruntime/containerd.go b/pkg/minikube/cruntime/containerd.go index 9dabf8a466..94bdc426af 100644 --- a/pkg/minikube/cruntime/containerd.go +++ b/pkg/minikube/cruntime/containerd.go @@ -157,11 +157,6 @@ func (r *Containerd) SocketPath() string { return "/run/containerd/containerd.sock" } -// DefaultCNI returns whether to use CNI networking by default -func (r *Containerd) DefaultCNI() bool { - return true -} - // Active returns if containerd is active on the host func (r *Containerd) Active() bool { return r.Init.Active("containerd") diff --git a/pkg/minikube/cruntime/crio.go b/pkg/minikube/cruntime/crio.go index d4334dd4f7..f424ea4104 100644 --- a/pkg/minikube/cruntime/crio.go +++ b/pkg/minikube/cruntime/crio.go @@ -89,11 +89,6 @@ func (r *CRIO) SocketPath() string { return "/var/run/crio/crio.sock" } -// DefaultCNI returns whether to use CNI networking by default -func (r *CRIO) DefaultCNI() bool { - return true -} - // Available returns an error if it is not possible to use this runtime on a host func (r *CRIO) Available() error { c := exec.Command("which", "crio") diff --git a/pkg/minikube/cruntime/cruntime.go b/pkg/minikube/cruntime/cruntime.go index 0cc5046f0d..c5df2d1e8a 100644 --- a/pkg/minikube/cruntime/cruntime.go +++ b/pkg/minikube/cruntime/cruntime.go @@ -79,8 +79,6 @@ type Manager interface { KubeletOptions() map[string]string // SocketPath returns the path to the socket file for a given runtime SocketPath() string - // DefaultCNI returns whether to use CNI networking by default - DefaultCNI() bool // Load an image idempotently into the runtime on a host LoadImage(string) error diff --git a/pkg/minikube/cruntime/docker.go b/pkg/minikube/cruntime/docker.go index dcb8beff92..5570c2a8c0 100644 --- a/pkg/minikube/cruntime/docker.go +++ b/pkg/minikube/cruntime/docker.go @@ -86,11 +86,6 @@ func (r *Docker) SocketPath() string { return r.Socket } -// DefaultCNI returns whether to use CNI networking by default -func (r *Docker) DefaultCNI() bool { - return false -} - // Available returns an error if it is not possible to use this runtime on a host func (r *Docker) Available() error { _, err := exec.LookPath("docker") diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index f9ee03f918..9809727180 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -169,9 +169,6 @@ func FlagDefaults(name string) FlagHints { fh := FlagHints{} if name != None { fh.CacheImages = true - if name == Docker { - fh.ExtraOptions = append(fh.ExtraOptions, fmt.Sprintf("kubeadm.pod-network-cidr=%s", config.DefaultPodCIDR)) - } return fh } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 3f1afe6202..a22f3c74e0 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -39,6 +39,7 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/cluster" + "k8s.io/minikube/pkg/minikube/cni" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" @@ -153,17 +154,18 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { prepareNone() } + glog.Infof("Will wait %s for node ...", waitTimeout) if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil { - return nil, errors.Wrap(err, "Wait failed") + return nil, errors.Wrapf(err, "wait %s for node", viper.GetDuration(waitTimeout)) } } else { if err := bs.UpdateNode(*starter.Cfg, *starter.Node, cr); err != nil { - return nil, errors.Wrap(err, "Updating node") + return nil, errors.Wrap(err, "update node") } // Make sure to use the command runner for the control plane to generate the join token - cpBs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper)) + cpBs, cpr, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper)) if err != nil { return nil, errors.Wrap(err, "getting control plane bootstrapper") } @@ -176,8 +178,18 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { if err = bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd); err != nil { return nil, errors.Wrap(err, "joining cluster") } + + cnm, err := cni.New(*starter.Cfg) + if err != nil { + return nil, errors.Wrap(err, "cni") + } + + if err := cnm.Apply(cpr, []cni.Runner{cpr, starter.Runner}); err != nil { + return nil, errors.Wrap(err, "cni apply") + } } + glog.Infof("waiting for startup goroutines ...") wg.Wait() // Write enabled addons to the config before completion diff --git a/pkg/minikube/out/style.go b/pkg/minikube/out/style.go index 2d2a64762b..24f839fe23 100644 --- a/pkg/minikube/out/style.go +++ b/pkg/minikube/out/style.go @@ -122,6 +122,7 @@ var styles = map[StyleEnum]style{ Unmount: {Prefix: "🔥 "}, VerifyingNoLine: {Prefix: "🤔 ", OmitNewline: true}, Verifying: {Prefix: "🤔 "}, + CNI: {Prefix: "🔗 "}, } // Add a prefix to a string diff --git a/pkg/minikube/out/style_enum.go b/pkg/minikube/out/style_enum.go index 9fe6362e2d..e6604eed9d 100644 --- a/pkg/minikube/out/style_enum.go +++ b/pkg/minikube/out/style_enum.go @@ -93,4 +93,5 @@ const ( WaitingPods Warning Workaround + CNI ) diff --git a/site/content/en/docs/commands/start.md b/site/content/en/docs/commands/start.md index 1a8a1580fb..ce8780bb1c 100644 --- a/site/content/en/docs/commands/start.md +++ b/site/content/en/docs/commands/start.md @@ -29,6 +29,7 @@ minikube start [flags] --auto-update-drivers If set, automatically updates drivers to the latest version. Defaults to true. (default true) --base-image string The base image to use for docker/podman drivers. Intended for local development. (default "gcr.io/k8s-minikube/kicbase:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438") --cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none. (default true) + --cni string CNI plug-in to use. Valid options: auto, calico, custom, flannel, kindnet (default: auto) --container-runtime string The container runtime to be used (docker, crio, containerd). (default "docker") --cpus int Number of CPUs allocated to Kubernetes. (default 2) --cri-socket string The cri socket path to be used. @@ -43,7 +44,7 @@ minikube start [flags] --driver string Used to specify the driver to run Kubernetes in. The list of available drivers depends on operating system. --dry-run dry-run mode. Validates configuration, but does not mutate system state --embed-certs if true, will embed the certs in kubeconfig. - --enable-default-cni Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with "--network-plugin=cni". + --enable-default-cni DEPRECATED: Replaced by --cni=custom --extra-config ExtraOption A set of key=value pairs that describe configuration that may be passed to different components. The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler @@ -77,7 +78,7 @@ minikube start [flags] --mount-string string The argument to pass the minikube mount command on start. --nat-nic-type string NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only) (default "virtio") --native-ssh Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'. (default true) - --network-plugin string The name of the network plugin. + --network-plugin string Kubelet network plug-in to use (default: auto) --nfs-share strings Local folders to share with Guest via NFS mounts (hyperkit driver only) --nfs-shares-root string Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only) (default "/nfsshares") --no-vtx-check Disable checking for the availability of hardware virtualization before the vm is started (virtualbox driver only) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 4508a56bbf..06874156f5 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -107,7 +107,6 @@ func TestFunctional(t *testing.T) { {"ComponentHealth", validateComponentHealth}, {"ConfigCmd", validateConfigCmd}, {"DashboardCmd", validateDashboardCmd}, - {"DNS", validateDNS}, {"DryRun", validateDryRun}, {"StatusCmd", validateStatusCmd}, {"LogsCmd", validateLogsCmd}, @@ -419,36 +418,6 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { } } -// validateDNS asserts that all Kubernetes DNS is healthy -func validateDNS(ctx context.Context, t *testing.T, profile string) { - defer PostMortemLogs(t, profile) - - rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox.yaml"))) - if err != nil { - t.Fatalf("failed to kubectl replace busybox : args %q: %v", rr.Command(), err) - } - - names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4)) - if err != nil { - t.Fatalf("failed waiting for busybox pod : %v", err) - } - - nslookup := func() error { - rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "nslookup", "kubernetes.default")) - return err - } - - // If the coredns process was stable, this retry wouldn't be necessary. - if err = retry.Expo(nslookup, 1*time.Second, Minutes(1)); err != nil { - t.Errorf("failed to do nslookup on kubernetes.default: %v", err) - } - - want := []byte("10.96.0.1") - if !bytes.Contains(rr.Stdout.Bytes(), want) { - t.Errorf("failed nslookup: got=%q, want=*%q*", rr.Stdout.Bytes(), want) - } -} - // validateDryRun asserts that the dry-run mode quickly exits with the right code func validateDryRun(ctx context.Context, t *testing.T, profile string) { // dry-run mode should always be able to finish quickly (<5s) diff --git a/test/integration/helpers_test.go b/test/integration/helpers_test.go index cc3036a7e9..75b6f9d22e 100644 --- a/test/integration/helpers_test.go +++ b/test/integration/helpers_test.go @@ -167,6 +167,7 @@ func (ss *StartSession) Stop(t *testing.T) { func Cleanup(t *testing.T, profile string, cancel context.CancelFunc) { // No helper because it makes the call log confusing. if *cleanup { + t.Logf("Cleaning up %q profile ...", profile) _, err := Run(t, exec.Command(Target(), "delete", "-p", profile)) if err != nil { t.Logf("failed cleanup: %v", err) diff --git a/test/integration/net_test.go b/test/integration/net_test.go new file mode 100644 index 0000000000..5db1409b53 --- /dev/null +++ b/test/integration/net_test.go @@ -0,0 +1,186 @@ +// +build integration + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "bytes" + "context" + "fmt" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "k8s.io/minikube/pkg/kapi" + "k8s.io/minikube/pkg/util/retry" +) + +func TestNetworkPlugins(t *testing.T) { + MaybeParallel(t) + + t.Run("group", func(t *testing.T) { + tests := []struct { + name string + args []string + kubeletPlugin string + podLabel string + hairpin bool + }{ + {"auto", []string{}, "", "", false}, + {"kubenet", []string{"--network-plugin=kubenet"}, "kubenet", "", true}, + {"bridge", []string{"--cni=bridge"}, "cni", "", true}, + {"enable-default-cni", []string{"--enable-default-cni=true"}, "cni", "", true}, + {"flannel", []string{"--cni=flannel"}, "cni", "app=flannel", true}, + {"kindnet", []string{"--cni=kindnet"}, "cni", "app=kindnet", true}, + {"false", []string{"--cni=false"}, "", "", false}, + {"custom-weave", []string{fmt.Sprintf("--cni=%s", filepath.Join(*testdataDir, "weavenet.yaml"))}, "cni", "", true}, + } + + for _, tc := range tests { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + start := time.Now() + MaybeParallel(t) + profile := UniqueProfileName(tc.name) + + ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) + defer Cleanup(t, profile, cancel) + + startArgs := append([]string{"start", "-p", profile, "--memory=1800", "--alsologtostderr", "--wait=true", "--wait-timeout=20m"}, tc.args...) + startArgs = append(startArgs, StartArgs()...) + + t.Run("Start", func(t *testing.T) { + _, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) + if err != nil { + t.Fatalf("failed start: %v", err) + } + }) + + if !t.Failed() && tc.podLabel != "" { + t.Run("ControllerPod", func(t *testing.T) { + if _, err := PodWait(ctx, t, profile, "kube-system", tc.podLabel, Minutes(8)); err != nil { + t.Fatalf("failed waiting for %s labeled pod: %v", tc.podLabel, err) + } + }) + } + if !t.Failed() { + t.Run("KubeletFlags", func(t *testing.T) { + rr, err := Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "pgrep -a kubelet")) + if err != nil { + t.Fatalf("ssh failed: %v", err) + } + out := rr.Stdout.String() + + if tc.kubeletPlugin == "" { + if strings.Contains(out, "--network-plugin") { + t.Errorf("expected no network plug-in, got %s", out) + } + } else { + if !strings.Contains(out, fmt.Sprintf("--network-plugin=%s", tc.kubeletPlugin)) { + t.Errorf("expected --network-plugin=%s, got %s", tc.kubeletPlugin, out) + } + } + + }) + } + + if !t.Failed() { + t.Run("NetCatPod", func(t *testing.T) { + _, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "netcat-deployment.yaml"))) + if err != nil { + t.Errorf("failed to apply netcat manifest: %v", err) + } + + client, err := kapi.Client(profile) + if err != nil { + t.Fatalf("failed to get Kubernetes client for %s: %v", profile, err) + } + + if err := kapi.WaitForDeploymentToStabilize(client, "default", "netcat", Minutes(12)); err != nil { + t.Errorf("failed waiting for netcat deployment to stabilize: %v", err) + } + + if _, err := PodWait(ctx, t, profile, "default", "app=netcat", Minutes(12)); err != nil { + t.Fatalf("failed waiting for netcat pod: %v", err) + } + + }) + } + + if !t.Failed() { + t.Run("DNS", func(t *testing.T) { + var rr *RunResult + var err error + + nslookup := func() error { + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", "deployment/netcat", "--", "nslookup", "kubernetes.default")) + return err + } + + // If the coredns process was stable, this retry wouldn't be necessary. + if err := retry.Expo(nslookup, 1*time.Second, Minutes(1)); err != nil { + t.Errorf("failed to do nslookup on kubernetes.default: %v", err) + } + + want := []byte("10.96.0.1") + if !bytes.Contains(rr.Stdout.Bytes(), want) { + t.Errorf("failed nslookup: got=%q, want=*%q*", rr.Stdout.Bytes(), want) + } + }) + } + + if !t.Failed() { + t.Run("Localhost", func(t *testing.T) { + tryLocal := func() error { + _, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", "deployment/netcat", "--", "/bin/sh", "-c", "nc -w 5 -i 5 -z localhost 8080")) + return err + } + + if err := retry.Expo(tryLocal, 1*time.Second, Seconds(30)); err != nil { + t.Errorf("failed to connect via localhost: %v", err) + } + }) + } + + if !t.Failed() { + t.Run("HairPin", func(t *testing.T) { + tryHairPin := func() error { + _, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", "deployment/netcat", "--", "/bin/sh", "-c", "nc -w 5 -i 5 -z netcat 8080")) + return err + } + + if tc.hairpin { + if err := retry.Expo(tryHairPin, 1*time.Second, Seconds(30)); err != nil { + t.Errorf("failed to connect via pod host: %v", err) + } + } else { + if tryHairPin() == nil { + t.Fatalf("hairpin connection unexpectedly succeeded - misconfigured test?") + } + } + }) + } + + t.Logf("%q test finished in %s, failed=%v", tc.name, time.Since(start), t.Failed()) + }) + } + }) +} diff --git a/test/integration/testdata/netcat-deployment-nomaster.yaml b/test/integration/testdata/netcat-deployment-nomaster.yaml new file mode 100644 index 0000000000..7c16d83c43 --- /dev/null +++ b/test/integration/testdata/netcat-deployment-nomaster.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: netcat + labels: + app: netcat +spec: + replicas: 1 + selector: + matchLabels: + app: netcat + template: + metadata: + labels: + app: netcat + spec: + containers: + # dnsutils is easier to debug DNS issues with than the standard busybox image + - name: dnsutils + image: gcr.io/kubernetes-e2e-test-images/dnsutils + command: + ["/bin/sh", "-c", "while true; do echo hello | nc -l -p 8080; done"] + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: NotIn + values: + - "" diff --git a/test/integration/testdata/netcat-deployment.yaml b/test/integration/testdata/netcat-deployment.yaml new file mode 100644 index 0000000000..6b0d85a6c4 --- /dev/null +++ b/test/integration/testdata/netcat-deployment.yaml @@ -0,0 +1,32 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: netcat + labels: + app: netcat +spec: + replicas: 1 + selector: + matchLabels: + app: netcat + template: + metadata: + labels: + app: netcat + spec: + containers: + # dnsutils is easier to debug DNS issues with than the standard busybox image + - name: dnsutils + image: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3 + command: + ["/bin/sh", "-c", "while true; do echo hello | nc -l -p 8080; done"] +--- +apiVersion: v1 +kind: Service +metadata: + name: netcat +spec: + ports: + - port: 8080 + selector: + app: netcat diff --git a/test/integration/testdata/weavenet.yaml b/test/integration/testdata/weavenet.yaml new file mode 100644 index 0000000000..8918cf2a95 --- /dev/null +++ b/test/integration/testdata/weavenet.yaml @@ -0,0 +1,255 @@ +apiVersion: v1 +kind: List +items: + - apiVersion: v1 + kind: ServiceAccount + metadata: + name: weave-net + annotations: + cloud.weave.works/launcher-info: |- + { + "original-request": { + "url": "/k8s/v1.16/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxOCIsIEdpdFZlcnNpb246InYxLjE4LjAiLCBHaXRDb21taXQ6IjllOTkxNDE1Mzg2ZTRjZjE1NWEyNGIxZGExNWJlY2FhMzkwNDM4ZDgiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDIwLTAzLTI2VDA2OjE2OjE1WiIsIEdvVmVyc2lvbjoiZ28xLjE0IiwgQ29tcGlsZXI6ImdjIiwgUGxhdGZvcm06ImRhcndpbi9hbWQ2NCJ9ClNlcnZlciBWZXJzaW9uOiB2ZXJzaW9uLkluZm97TWFqb3I6IjEiLCBNaW5vcjoiMTgiLCBHaXRWZXJzaW9uOiJ2MS4xOC4zIiwgR2l0Q29tbWl0OiIyZTc5OTZlM2UyNzEyNjg0YmM3M2YwZGVjMDIwMGQ2NGVlYzdmZTQwIiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAyMC0wNS0yMFQxMjo0MzozNFoiLCBHb1ZlcnNpb246ImdvMS4xMy45IiwgQ29tcGlsZXI6ImdjIiwgUGxhdGZvcm06ImxpbnV4L2FtZDY0In0K", + "date": "Tue Jun 23 2020 02:18:50 GMT+0000 (UTC)" + }, + "email-address": "support@weave.works" + } + labels: + name: weave-net + namespace: kube-system + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: weave-net + annotations: + cloud.weave.works/launcher-info: |- + { + "original-request": { + "url": "/k8s/v1.16/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxOCIsIEdpdFZlcnNpb246InYxLjE4LjAiLCBHaXRDb21taXQ6IjllOTkxNDE1Mzg2ZTRjZjE1NWEyNGIxZGExNWJlY2FhMzkwNDM4ZDgiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDIwLTAzLTI2VDA2OjE2OjE1WiIsIEdvVmVyc2lvbjoiZ28xLjE0IiwgQ29tcGlsZXI6ImdjIiwgUGxhdGZvcm06ImRhcndpbi9hbWQ2NCJ9ClNlcnZlciBWZXJzaW9uOiB2ZXJzaW9uLkluZm97TWFqb3I6IjEiLCBNaW5vcjoiMTgiLCBHaXRWZXJzaW9uOiJ2MS4xOC4zIiwgR2l0Q29tbWl0OiIyZTc5OTZlM2UyNzEyNjg0YmM3M2YwZGVjMDIwMGQ2NGVlYzdmZTQwIiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAyMC0wNS0yMFQxMjo0MzozNFoiLCBHb1ZlcnNpb246ImdvMS4xMy45IiwgQ29tcGlsZXI6ImdjIiwgUGxhdGZvcm06ImxpbnV4L2FtZDY0In0K", + "date": "Tue Jun 23 2020 02:18:50 GMT+0000 (UTC)" + }, + "email-address": "support@weave.works" + } + labels: + name: weave-net + rules: + - apiGroups: + - '' + resources: + - pods + - namespaces + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - nodes/status + verbs: + - patch + - update + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: weave-net + annotations: + cloud.weave.works/launcher-info: |- + { + "original-request": { + "url": "/k8s/v1.16/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxOCIsIEdpdFZlcnNpb246InYxLjE4LjAiLCBHaXRDb21taXQ6IjllOTkxNDE1Mzg2ZTRjZjE1NWEyNGIxZGExNWJlY2FhMzkwNDM4ZDgiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDIwLTAzLTI2VDA2OjE2OjE1WiIsIEdvVmVyc2lvbjoiZ28xLjE0IiwgQ29tcGlsZXI6ImdjIiwgUGxhdGZvcm06ImRhcndpbi9hbWQ2NCJ9ClNlcnZlciBWZXJzaW9uOiB2ZXJzaW9uLkluZm97TWFqb3I6IjEiLCBNaW5vcjoiMTgiLCBHaXRWZXJzaW9uOiJ2MS4xOC4zIiwgR2l0Q29tbWl0OiIyZTc5OTZlM2UyNzEyNjg0YmM3M2YwZGVjMDIwMGQ2NGVlYzdmZTQwIiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAyMC0wNS0yMFQxMjo0MzozNFoiLCBHb1ZlcnNpb246ImdvMS4xMy45IiwgQ29tcGlsZXI6ImdjIiwgUGxhdGZvcm06ImxpbnV4L2FtZDY0In0K", + "date": "Tue Jun 23 2020 02:18:50 GMT+0000 (UTC)" + }, + "email-address": "support@weave.works" + } + labels: + name: weave-net + roleRef: + kind: ClusterRole + name: weave-net + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: ServiceAccount + name: weave-net + namespace: kube-system + - apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: weave-net + annotations: + cloud.weave.works/launcher-info: |- + { + "original-request": { + "url": "/k8s/v1.16/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxOCIsIEdpdFZlcnNpb246InYxLjE4LjAiLCBHaXRDb21taXQ6IjllOTkxNDE1Mzg2ZTRjZjE1NWEyNGIxZGExNWJlY2FhMzkwNDM4ZDgiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDIwLTAzLTI2VDA2OjE2OjE1WiIsIEdvVmVyc2lvbjoiZ28xLjE0IiwgQ29tcGlsZXI6ImdjIiwgUGxhdGZvcm06ImRhcndpbi9hbWQ2NCJ9ClNlcnZlciBWZXJzaW9uOiB2ZXJzaW9uLkluZm97TWFqb3I6IjEiLCBNaW5vcjoiMTgiLCBHaXRWZXJzaW9uOiJ2MS4xOC4zIiwgR2l0Q29tbWl0OiIyZTc5OTZlM2UyNzEyNjg0YmM3M2YwZGVjMDIwMGQ2NGVlYzdmZTQwIiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAyMC0wNS0yMFQxMjo0MzozNFoiLCBHb1ZlcnNpb246ImdvMS4xMy45IiwgQ29tcGlsZXI6ImdjIiwgUGxhdGZvcm06ImxpbnV4L2FtZDY0In0K", + "date": "Tue Jun 23 2020 02:18:50 GMT+0000 (UTC)" + }, + "email-address": "support@weave.works" + } + labels: + name: weave-net + namespace: kube-system + rules: + - apiGroups: + - '' + resourceNames: + - weave-net + resources: + - configmaps + verbs: + - get + - update + - apiGroups: + - '' + resources: + - configmaps + verbs: + - create + - apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: weave-net + annotations: + cloud.weave.works/launcher-info: |- + { + "original-request": { + "url": "/k8s/v1.16/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxOCIsIEdpdFZlcnNpb246InYxLjE4LjAiLCBHaXRDb21taXQ6IjllOTkxNDE1Mzg2ZTRjZjE1NWEyNGIxZGExNWJlY2FhMzkwNDM4ZDgiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDIwLTAzLTI2VDA2OjE2OjE1WiIsIEdvVmVyc2lvbjoiZ28xLjE0IiwgQ29tcGlsZXI6ImdjIiwgUGxhdGZvcm06ImRhcndpbi9hbWQ2NCJ9ClNlcnZlciBWZXJzaW9uOiB2ZXJzaW9uLkluZm97TWFqb3I6IjEiLCBNaW5vcjoiMTgiLCBHaXRWZXJzaW9uOiJ2MS4xOC4zIiwgR2l0Q29tbWl0OiIyZTc5OTZlM2UyNzEyNjg0YmM3M2YwZGVjMDIwMGQ2NGVlYzdmZTQwIiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAyMC0wNS0yMFQxMjo0MzozNFoiLCBHb1ZlcnNpb246ImdvMS4xMy45IiwgQ29tcGlsZXI6ImdjIiwgUGxhdGZvcm06ImxpbnV4L2FtZDY0In0K", + "date": "Tue Jun 23 2020 02:18:50 GMT+0000 (UTC)" + }, + "email-address": "support@weave.works" + } + labels: + name: weave-net + namespace: kube-system + roleRef: + kind: Role + name: weave-net + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: ServiceAccount + name: weave-net + namespace: kube-system + - apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: weave-net + annotations: + cloud.weave.works/launcher-info: |- + { + "original-request": { + "url": "/k8s/v1.16/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxOCIsIEdpdFZlcnNpb246InYxLjE4LjAiLCBHaXRDb21taXQ6IjllOTkxNDE1Mzg2ZTRjZjE1NWEyNGIxZGExNWJlY2FhMzkwNDM4ZDgiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDIwLTAzLTI2VDA2OjE2OjE1WiIsIEdvVmVyc2lvbjoiZ28xLjE0IiwgQ29tcGlsZXI6ImdjIiwgUGxhdGZvcm06ImRhcndpbi9hbWQ2NCJ9ClNlcnZlciBWZXJzaW9uOiB2ZXJzaW9uLkluZm97TWFqb3I6IjEiLCBNaW5vcjoiMTgiLCBHaXRWZXJzaW9uOiJ2MS4xOC4zIiwgR2l0Q29tbWl0OiIyZTc5OTZlM2UyNzEyNjg0YmM3M2YwZGVjMDIwMGQ2NGVlYzdmZTQwIiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAyMC0wNS0yMFQxMjo0MzozNFoiLCBHb1ZlcnNpb246ImdvMS4xMy45IiwgQ29tcGlsZXI6ImdjIiwgUGxhdGZvcm06ImxpbnV4L2FtZDY0In0K", + "date": "Tue Jun 23 2020 02:18:50 GMT+0000 (UTC)" + }, + "email-address": "support@weave.works" + } + labels: + name: weave-net + namespace: kube-system + spec: + minReadySeconds: 5 + selector: + matchLabels: + name: weave-net + template: + metadata: + labels: + name: weave-net + spec: + containers: + - name: weave + command: + - /home/weave/launch.sh + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: 'docker.io/weaveworks/weave-kube:2.6.5' + readinessProbe: + httpGet: + host: 127.0.0.1 + path: /status + port: 6784 + resources: + requests: + cpu: 10m + securityContext: + privileged: true + volumeMounts: + - name: weavedb + mountPath: /weavedb + - name: cni-bin + mountPath: /host/opt + - name: cni-bin2 + mountPath: /host/home + - name: cni-conf + mountPath: /host/etc + - name: dbus + mountPath: /host/var/lib/dbus + - name: lib-modules + mountPath: /lib/modules + - name: xtables-lock + mountPath: /run/xtables.lock + - name: weave-npc + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: 'docker.io/weaveworks/weave-npc:2.6.5' + resources: + requests: + cpu: 10m + securityContext: + privileged: true + volumeMounts: + - name: xtables-lock + mountPath: /run/xtables.lock + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + hostPID: true + priorityClassName: system-node-critical + restartPolicy: Always + securityContext: + seLinuxOptions: {} + serviceAccountName: weave-net + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: weavedb + hostPath: + path: /var/lib/weave + - name: cni-bin + hostPath: + path: /opt + - name: cni-bin2 + hostPath: + path: /home + - name: cni-conf + hostPath: + path: /etc + - name: dbus + hostPath: + path: /var/lib/dbus + - name: lib-modules + hostPath: + path: /lib/modules + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + updateStrategy: + type: RollingUpdate From 65b63a6e67addb96768006913300d69ecd3b38cd Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Tue, 23 Jun 2020 20:12:19 -0700 Subject: [PATCH 02/12] Make test timeouts extremely pessimistic --- test/integration/net_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test/integration/net_test.go b/test/integration/net_test.go index 5db1409b53..71c2062b78 100644 --- a/test/integration/net_test.go +++ b/test/integration/net_test.go @@ -61,10 +61,10 @@ func TestNetworkPlugins(t *testing.T) { MaybeParallel(t) profile := UniqueProfileName(tc.name) - ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) + ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) defer Cleanup(t, profile, cancel) - startArgs := append([]string{"start", "-p", profile, "--memory=1800", "--alsologtostderr", "--wait=true", "--wait-timeout=20m"}, tc.args...) + startArgs := append([]string{"start", "-p", profile, "--memory=1800", "--alsologtostderr", "--wait=true", "--wait-timeout=25m"}, tc.args...) startArgs = append(startArgs, StartArgs()...) t.Run("Start", func(t *testing.T) { @@ -76,7 +76,7 @@ func TestNetworkPlugins(t *testing.T) { if !t.Failed() && tc.podLabel != "" { t.Run("ControllerPod", func(t *testing.T) { - if _, err := PodWait(ctx, t, profile, "kube-system", tc.podLabel, Minutes(8)); err != nil { + if _, err := PodWait(ctx, t, profile, "kube-system", tc.podLabel, Minutes(10)); err != nil { t.Fatalf("failed waiting for %s labeled pod: %v", tc.podLabel, err) } }) @@ -114,11 +114,11 @@ func TestNetworkPlugins(t *testing.T) { t.Fatalf("failed to get Kubernetes client for %s: %v", profile, err) } - if err := kapi.WaitForDeploymentToStabilize(client, "default", "netcat", Minutes(12)); err != nil { + if err := kapi.WaitForDeploymentToStabilize(client, "default", "netcat", Minutes(15)); err != nil { t.Errorf("failed waiting for netcat deployment to stabilize: %v", err) } - if _, err := PodWait(ctx, t, profile, "default", "app=netcat", Minutes(12)); err != nil { + if _, err := PodWait(ctx, t, profile, "default", "app=netcat", Minutes(15)); err != nil { t.Fatalf("failed waiting for netcat pod: %v", err) } @@ -136,7 +136,7 @@ func TestNetworkPlugins(t *testing.T) { } // If the coredns process was stable, this retry wouldn't be necessary. - if err := retry.Expo(nslookup, 1*time.Second, Minutes(1)); err != nil { + if err := retry.Expo(nslookup, 1*time.Second, Minutes(2)); err != nil { t.Errorf("failed to do nslookup on kubernetes.default: %v", err) } @@ -154,7 +154,7 @@ func TestNetworkPlugins(t *testing.T) { return err } - if err := retry.Expo(tryLocal, 1*time.Second, Seconds(30)); err != nil { + if err := retry.Expo(tryLocal, 1*time.Second, Seconds(60)); err != nil { t.Errorf("failed to connect via localhost: %v", err) } }) @@ -168,7 +168,7 @@ func TestNetworkPlugins(t *testing.T) { } if tc.hairpin { - if err := retry.Expo(tryHairPin, 1*time.Second, Seconds(30)); err != nil { + if err := retry.Expo(tryHairPin, 1*time.Second, Seconds(60)); err != nil { t.Errorf("failed to connect via pod host: %v", err) } } else { From 5680774f42a6095c9a91e51a66987aed10340c00 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Tue, 23 Jun 2020 20:18:59 -0700 Subject: [PATCH 03/12] Remove unused hack scripts --- .../cloudshell_integration_tests_none.sh | 44 ------------------- hack/jenkins/linux_conformance_tests_kvm.sh | 39 ---------------- 2 files changed, 83 deletions(-) delete mode 100755 hack/jenkins/cloudshell_integration_tests_none.sh delete mode 100755 hack/jenkins/linux_conformance_tests_kvm.sh diff --git a/hack/jenkins/cloudshell_integration_tests_none.sh b/hack/jenkins/cloudshell_integration_tests_none.sh deleted file mode 100755 index e888c9bfbc..0000000000 --- a/hack/jenkins/cloudshell_integration_tests_none.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# This script runs the integration tests on a Linux machine for the none Driver - -# The script expects the following env variables: -# MINIKUBE_LOCATION: GIT_COMMIT from upstream build. -# COMMIT: Actual commit ID from upstream build -# EXTRA_BUILD_ARGS (optional): Extra args to be passed into the minikube integrations tests -# access_token: The Github API access token. Injected by the Jenkins credential provider. - - -set -e - -OS_ARCH="linux-amd64" -VM_DRIVER="none" -JOB_NAME="none_cloudshell" - - - -SUDO_PREFIX="sudo -E " -export KUBECONFIG="/root/.kube/config" - -gcloud alpha cloud-shell ssh --boosted "uptime" -gcloud alpha cloud-shell scp --boosted "" - -mkdir -p cron && gsutil -m rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES" -sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP" - -source ./common.sh diff --git a/hack/jenkins/linux_conformance_tests_kvm.sh b/hack/jenkins/linux_conformance_tests_kvm.sh deleted file mode 100755 index 65038e0cdb..0000000000 --- a/hack/jenkins/linux_conformance_tests_kvm.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# This script runs the integration tests on a Linux machine for the KVM Driver - -# The script expects the following env variables: -# MINIKUBE_LOCATION: GIT_COMMIT from upstream build. -# COMMIT: Actual commit ID from upstream build -# EXTRA_BUILD_ARGS (optional): Extra args to be passed into the minikube integrations tests -# access_token: The Github API access token. Injected by the Jenkins credential provider. - -set -e - -OS_ARCH="linux-amd64" -VM_DRIVER="kvm2" -JOB_NAME="KVM_Linux" -EXPECTED_DEFAULT_DRIVER="kvm2" - -# We pick kvm as our gvisor testbed because it is fast & reliable -EXTRA_TEST_ARGS="-gvisor" - -mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES" -sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP" - -source ./common.sh From 2249d36dc894b863cd8cd74aaeadac46f76dbfe1 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 24 Jun 2020 09:18:34 -0700 Subject: [PATCH 04/12] Skip flannel if Docker, weave hairpin --- test/integration/net_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/test/integration/net_test.go b/test/integration/net_test.go index 71c2062b78..bb5e43c446 100644 --- a/test/integration/net_test.go +++ b/test/integration/net_test.go @@ -57,6 +57,10 @@ func TestNetworkPlugins(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { + if DockerDriver() && strings.Contains(tc.name, "flannel") { + t.Skipf("flannel is not yet compatible with Docker driver: iptables v1.8.3 (legacy): Couldn't load target `CNI-x': No such file or directory") + } + start := time.Now() MaybeParallel(t) profile := UniqueProfileName(tc.name) @@ -162,6 +166,10 @@ func TestNetworkPlugins(t *testing.T) { if !t.Failed() { t.Run("HairPin", func(t *testing.T) { + if strings.Contains(tc.name, "weave") { + t.Skipf("skipping: weavenet hairpin results vary substantially across environments") + } + tryHairPin := func() error { _, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", "deployment/netcat", "--", "/bin/sh", "-c", "nc -w 5 -i 5 -z netcat 8080")) return err From 521d334b6abffcd9cdcfcfc5cd10d18d4b91f242 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 25 Jun 2020 14:23:18 -0700 Subject: [PATCH 05/12] Apply CNI on restarts, remove .rej file --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 51 ++++++++++++------- .../bootstrapper/kubeadm/kubeadm.go.rej | 16 ------ 2 files changed, 34 insertions(+), 33 deletions(-) delete mode 100644 pkg/minikube/bootstrapper/kubeadm/kubeadm.go.rej diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index f9750fc3c8..7d5cea08bf 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -230,23 +230,8 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { return errors.Wrap(err, "run") } - cnm, err := cni.New(cfg) - if err != nil { - return errors.Wrap(err, "cni config") - } - - if _, ok := cnm.(cni.Disabled); !ok { - out.T(out.CNI, "Configuring {{.name}} (Container Networking Interface) ...", out.V{"name": cnm.String()}) - - if err := cnm.Apply(k.c, []cni.Runner{k.c}); err != nil { - return errors.Wrap(err, "cni apply") - } - - if cfg.KubernetesConfig.ContainerRuntime == "crio" { - if err := sysinit.New(k.c).Restart("crio"); err != nil { - glog.Errorf("failed to restart CRI: %v", err) - } - } + if err := k.applyCNI(cfg); err != nil { + return errors.Wrap(err, "apply cni") } var wg sync.WaitGroup @@ -278,6 +263,33 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { return nil } +// applyCNI applies CNI to a cluster. Needs to be done every time a VM is powered up. +func (k *Bootstrapper) applyCNI(cfg config.ClusterConfig) error { + + cnm, err := cni.New(cfg) + if err != nil { + return errors.Wrap(err, "cni config") + } + + if _, ok := cnm.(cni.Disabled); ok { + return nil + } + + out.T(out.CNI, "Configuring {{.name}} (Container Networking Interface) ...", out.V{"name": cnm.String()}) + + if err := cnm.Apply(k.c, []cni.Runner{k.c}); err != nil { + return errors.Wrap(err, "cni apply") + } + + if cfg.KubernetesConfig.ContainerRuntime == "crio" { + if err := sysinit.New(k.c).Restart("crio"); err != nil { + glog.Errorf("failed to restart CRI: %v", err) + } + } + + return nil +} + // unpause unpauses any Kubernetes backplane components func (k *Bootstrapper) unpause(cfg config.ClusterConfig) error { @@ -568,6 +580,11 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error { return errors.Wrap(err, "apiserver health") } + // because reboots clear /etc/cni + if err := k.applyCNI(cfg); err != nil { + return errors.Wrap(err, "apply cni") + } + if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { return errors.Wrap(err, "system pods") } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go.rej b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go.rej deleted file mode 100644 index 213b384a71..0000000000 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go.rej +++ /dev/null @@ -1,16 +0,0 @@ -*************** -*** 307,312 **** - return nil - } - - out.ErrT(out.Embarrassed, "Unable to restart cluster, will reset it: {{.error}}", out.V{"error": rerr}) - if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil { - glog.Warningf("delete failed: %v", err) ---- 307,313 ---- - return nil - } - -+ panic(fmt.Errorf("restart failed: %v", err)) - out.ErrT(out.Embarrassed, "Unable to restart cluster, will reset it: {{.error}}", out.V{"error": rerr}) - if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil { - glog.Warningf("delete failed: %v", err) From 2a39d993ea8349b87a373fadb7ef787bf83f0091 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 25 Jun 2020 14:36:39 -0700 Subject: [PATCH 06/12] Address lint comments, remove confusing multi-runner API --- cmd/minikube/cmd/start_flags.go | 4 ++-- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 2 +- pkg/minikube/cni/bridge.go | 12 ++++++++++-- pkg/minikube/cni/cni.go | 15 ++------------- pkg/minikube/cni/custom.go | 4 ++-- pkg/minikube/cni/disabled.go | 2 +- pkg/minikube/cni/flannel.go | 4 ++-- pkg/minikube/cni/kindnet.go | 4 ++-- pkg/minikube/node/start.go | 2 +- 9 files changed, 23 insertions(+), 26 deletions(-) diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index 4ee4e71ea7..35ff7631ee 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -133,8 +133,8 @@ func initMinikubeFlags() { startCmd.Flags().StringArrayVar(&config.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.") startCmd.Flags().String(criSocket, "", "The cri socket path to be used.") startCmd.Flags().String(networkPlugin, "", "Kubelet network plug-in to use (default: auto)") - startCmd.Flags().Bool(enableDefaultCNI, false, "DEPRECATED: Replaced by --cni=custom") - startCmd.Flags().String(cniFlag, "", "CNI plug-in to use. Valid options: auto, calico, custom, flannel, kindnet (default: auto)") + startCmd.Flags().Bool(enableDefaultCNI, false, "DEPRECATED: Replaced by --cni=bridge") + startCmd.Flags().String(cniFlag, "", "CNI plug-in to use. Valid options: auto, bridge, flannel, kindnet, or path to a CNI manifest (default: auto)") startCmd.Flags().StringSlice(waitComponents, kverify.DefaultWaitList, fmt.Sprintf("comma separated list of Kubernetes components to verify and wait for after starting a cluster. defaults to %q, available options: %q . other acceptable values are 'all' or 'none', 'true' and 'false'", strings.Join(kverify.DefaultWaitList, ","), strings.Join(kverify.AllComponentsList, ","))) startCmd.Flags().Duration(waitTimeout, 6*time.Minute, "max time to wait per Kubernetes core services to be healthy.") startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.") diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 7d5cea08bf..138e7c8cce 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -277,7 +277,7 @@ func (k *Bootstrapper) applyCNI(cfg config.ClusterConfig) error { out.T(out.CNI, "Configuring {{.name}} (Container Networking Interface) ...", out.V{"name": cnm.String()}) - if err := cnm.Apply(k.c, []cni.Runner{k.c}); err != nil { + if err := cnm.Apply(k.c); err != nil { return errors.Wrap(err, "cni apply") } diff --git a/pkg/minikube/cni/bridge.go b/pkg/minikube/cni/bridge.go index 0340b92fc9..b24025d977 100644 --- a/pkg/minikube/cni/bridge.go +++ b/pkg/minikube/cni/bridge.go @@ -18,6 +18,7 @@ package cni import ( "bytes" + "fmt" "text/template" "github.com/pkg/errors" @@ -68,13 +69,20 @@ func (c Bridge) netconf() (assets.CopyableFile, error) { } // Apply enables the CNI -func (c Bridge) Apply(_ Runner, nodes []Runner) error { +func (c Bridge) Apply(r Runner) error { + if len(c.cc.Nodes) > 1 { + return fmt.Errorf("bridge CNI is incompatible with multi-node clusters") + } + f, err := c.netconf() if err != nil { return errors.Wrap(err, "netconf") } - return applyNetConf(nodes, f) + if err := r.Copy(f); err != nil { + return errors.Wrapf(err, "copy") + } + return nil } // CIDR returns the default CIDR used by this CNI diff --git a/pkg/minikube/cni/cni.go b/pkg/minikube/cni/cni.go index 947ac8d20f..eb54a262e9 100644 --- a/pkg/minikube/cni/cni.go +++ b/pkg/minikube/cni/cni.go @@ -46,8 +46,8 @@ type Runner interface { // Manager is a common interface for CNI type Manager interface { - // Enable enables the CNI - Apply(Runner, []Runner) error + // Apply a CNI. The provided runner is for the control plane + Apply(Runner) error // CIDR returns the default CIDR used by this CNI CIDR() string @@ -147,14 +147,3 @@ func applyManifest(cc config.ClusterConfig, r Runner, f assets.CopyableFile) err return nil } - -// applyNetConf applies a netconf file across nodes -func applyNetConf(rs []Runner, f assets.CopyableFile) error { - for _, r := range rs { - if err := r.Copy(f); err != nil { - return errors.Wrapf(err, "copy") - } - } - - return nil -} diff --git a/pkg/minikube/cni/custom.go b/pkg/minikube/cni/custom.go index a857a55483..6592d513ae 100644 --- a/pkg/minikube/cni/custom.go +++ b/pkg/minikube/cni/custom.go @@ -51,13 +51,13 @@ func NewCustom(cc config.ClusterConfig, manifest string) (Custom, error) { } // Apply enables the CNI -func (c Custom) Apply(master Runner, nodes []Runner) error { +func (c Custom) Apply(r Runner) error { m, err := assets.NewFileAsset(c.manifest, path.Dir(manifestPath()), path.Base(manifestPath()), "0644") if err != nil { return errors.Wrap(err, "manifest") } - return applyManifest(c.cc, master, m) + return applyManifest(c.cc, r, m) } // CIDR returns the default CIDR used by this CNI diff --git a/pkg/minikube/cni/disabled.go b/pkg/minikube/cni/disabled.go index 9eb49d729d..1c46ab40de 100644 --- a/pkg/minikube/cni/disabled.go +++ b/pkg/minikube/cni/disabled.go @@ -33,7 +33,7 @@ func (c Disabled) String() string { } // Apply enables the CNI -func (c Disabled) Apply(master Runner, nodes []Runner) error { +func (c Disabled) Apply(r Runner) error { if driver.IsKIC(c.cc.Driver) && c.cc.KubernetesConfig.ContainerRuntime != "docker" { glog.Warningf("CNI is recommended for %q driver and %q runtime - expect networking issues", c.cc.Driver, c.cc.KubernetesConfig.ContainerRuntime) } diff --git a/pkg/minikube/cni/flannel.go b/pkg/minikube/cni/flannel.go index f8a8cf30fa..4986b4d12e 100644 --- a/pkg/minikube/cni/flannel.go +++ b/pkg/minikube/cni/flannel.go @@ -636,8 +636,8 @@ func (c Flannel) String() string { } // Apply enables the CNI -func (c Flannel) Apply(master Runner, nodes []Runner) error { - return applyManifest(c.cc, master, manifestAsset([]byte(flannelTmpl))) +func (c Flannel) Apply(r Runner) error { + return applyManifest(c.cc, r, manifestAsset([]byte(flannelTmpl))) } // CIDR returns the default CIDR used by this CNI diff --git a/pkg/minikube/cni/kindnet.go b/pkg/minikube/cni/kindnet.go index fc6c178f87..6178c6715f 100644 --- a/pkg/minikube/cni/kindnet.go +++ b/pkg/minikube/cni/kindnet.go @@ -167,12 +167,12 @@ func (c KindNet) manifest() (assets.CopyableFile, error) { } // Apply enables the CNI -func (c KindNet) Apply(master Runner, nodes []Runner) error { +func (c KindNet) Apply(r Runner) error { m, err := c.manifest() if err != nil { return errors.Wrap(err, "manifest") } - return applyManifest(c.cc, master, m) + return applyManifest(c.cc, r, m) } // CIDR returns the default CIDR used by this CNI diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index e30e9ee343..6541568c06 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -191,7 +191,7 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { return nil, errors.Wrap(err, "cni") } - if err := cnm.Apply(cpr, []cni.Runner{cpr, starter.Runner}); err != nil { + if err := cnm.Apply(cpr); err != nil { return nil, errors.Wrap(err, "cni apply") } } From d16e7d492432f8ea2911453c7742d4b54b062450 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 25 Jun 2020 14:43:19 -0700 Subject: [PATCH 07/12] Use new kapi.KubectlBinary API --- pkg/minikube/cni/cni.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pkg/minikube/cni/cni.go b/pkg/minikube/cni/cni.go index eb54a262e9..6d278ed894 100644 --- a/pkg/minikube/cni/cni.go +++ b/pkg/minikube/cni/cni.go @@ -26,6 +26,7 @@ import ( "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/minikube/pkg/kapi" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" @@ -123,17 +124,12 @@ func manifestAsset(b []byte) assets.CopyableFile { return assets.NewMemoryAssetTarget(b, manifestPath(), "0644") } -// kubectlPath returns the path to the kubelet -func kubectlPath(cc config.ClusterConfig) string { - return path.Join(vmpath.GuestPersistentDir, "binaries", cc.KubernetesConfig.KubernetesVersion, "kubectl") -} - // applyManifest applies a CNI manifest func applyManifest(cc config.ClusterConfig, r Runner, f assets.CopyableFile) error { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - kubectl := kubectlPath(cc) + kubectl := kapi.KubectlBinaryPath(cc.KubernetesConfig.KubernetesVersion) glog.Infof("applying CNI manifest using %s ...", kubectl) if err := r.Copy(f); err != nil { From 2f3094d32cd537d9f9317e64bac71dcbc525d964 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 25 Jun 2020 14:43:29 -0700 Subject: [PATCH 08/12] simplify string output --- pkg/minikube/cni/bridge.go | 2 +- pkg/minikube/cni/custom.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/minikube/cni/bridge.go b/pkg/minikube/cni/bridge.go index b24025d977..5e36e91be2 100644 --- a/pkg/minikube/cni/bridge.go +++ b/pkg/minikube/cni/bridge.go @@ -54,7 +54,7 @@ type Bridge struct { // String returns a string representation of this CNI func (c Bridge) String() string { - return "Bridge CNI" + return "bridge CNI" } func (c Bridge) netconf() (assets.CopyableFile, error) { diff --git a/pkg/minikube/cni/custom.go b/pkg/minikube/cni/custom.go index 6592d513ae..e68eb4f49e 100644 --- a/pkg/minikube/cni/custom.go +++ b/pkg/minikube/cni/custom.go @@ -17,7 +17,6 @@ limitations under the License. package cni import ( - "fmt" "os" "path" @@ -34,7 +33,7 @@ type Custom struct { // String returns a string representation of this CNI func (c Custom) String() string { - return fmt.Sprintf("Custom (%s)", c.manifest) + return c.manifest } // NewCustom returns a well-formed Custom CNI manager From f091b4d1f6c719330cb76e7cb9602896513aab21 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 25 Jun 2020 14:45:50 -0700 Subject: [PATCH 09/12] Decrease test pass count by 1, as per code review comment --- .github/workflows/pr.yml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 89e7d60c5f..60428c014b 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -18,7 +18,7 @@ jobs: - name: Download Dependencies run: go mod download - name: Build Binaries - run: | + run: | make cross make e2e-cross cp -r test/integration/testdata ./out @@ -539,7 +539,7 @@ jobs: echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi if [ "$numPass" -eq 0 ];then echo "*** 0 Passed! ***";exit 2;fi - if [ "$numPass" -lt 27 ];then echo "*** Failed to pass at least 27 ! ***";exit 2;fi + if [ "$numPass" -lt 26 ];then echo "*** Failed to pass at least 26 ! ***";exit 2;fi if [ "$numPass" -eq 0 ];then echo "*** Passed! ***";exit 0;fi addons_certs_docker_ubuntu: runs-on: ubuntu-18.04 @@ -1028,17 +1028,17 @@ jobs: if: always() needs: [ - functional_docker_ubuntu, - functional_virtualbox_macos, - functional_docker_windows, - functional_hyperv_windows, - functional_baremetal_ubuntu18_04, - addons_certs_docker_ubuntu, - addons_certs_virtualbox_macos, - multinode_docker_ubuntu, - multinode_virtualbox_macos, - pause_preload_dockerflags_docker_ubuntu, - pause_preload_dockerflags_virtualbox_macos, + functional_docker_ubuntu, + functional_virtualbox_macos, + functional_docker_windows, + functional_hyperv_windows, + functional_baremetal_ubuntu18_04, + addons_certs_docker_ubuntu, + addons_certs_virtualbox_macos, + multinode_docker_ubuntu, + multinode_virtualbox_macos, + pause_preload_dockerflags_docker_ubuntu, + pause_preload_dockerflags_virtualbox_macos, ] runs-on: ubuntu-18.04 steps: From a43d18ef9a85a3b7d4b39d4fa39ba6d18fd5452d Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 25 Jun 2020 15:21:25 -0700 Subject: [PATCH 10/12] Update Bridge docstring --- pkg/minikube/cni/bridge.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/cni/bridge.go b/pkg/minikube/cni/bridge.go index 5e36e91be2..9bac1b8244 100644 --- a/pkg/minikube/cni/bridge.go +++ b/pkg/minikube/cni/bridge.go @@ -47,7 +47,7 @@ var bridgeConf = template.Must(template.New("bridge").Parse(` } `)) -// Bridge is a CNI manager than does nothing +// Bridge is a simple CNI manager for single-node usage type Bridge struct { cc config.ClusterConfig } From 94b8ddcb70440b7162c57aedd98f41d0cd3b262d Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 25 Jun 2020 15:27:51 -0700 Subject: [PATCH 11/12] Decrease test count to 26 --- .github/workflows/master.yml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index e7900142a4..a31479093c 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -20,7 +20,7 @@ jobs: - name: Download Dependencies run: go mod download - name: Build Binaries - run: | + run: | make cross make e2e-cross cp -r test/integration/testdata ./out @@ -541,7 +541,7 @@ jobs: echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi if [ "$numPass" -eq 0 ];then echo "*** 0 Passed! ***";exit 2;fi - if [ "$numPass" -lt 27 ];then echo "*** Failed to pass at least 27 ! ***";exit 2;fi + if [ "$numPass" -lt 26 ];then echo "*** Failed to pass at least 26 ! ***";exit 2;fi if [ "$numPass" -eq 0 ];then echo "*** Passed! ***";exit 0;fi addons_certs_docker_ubuntu: runs-on: ubuntu-18.04 @@ -1030,17 +1030,17 @@ jobs: if: always() needs: [ - functional_docker_ubuntu, - functional_virtualbox_macos, - functional_docker_windows, - functional_hyperv_windows, - functional_baremetal_ubuntu18_04, - addons_certs_docker_ubuntu, - addons_certs_virtualbox_macos, - multinode_docker_ubuntu, - multinode_virtualbox_macos, - pause_preload_dockerflags_docker_ubuntu, - pause_preload_dockerflags_virtualbox_macos, + functional_docker_ubuntu, + functional_virtualbox_macos, + functional_docker_windows, + functional_hyperv_windows, + functional_baremetal_ubuntu18_04, + addons_certs_docker_ubuntu, + addons_certs_virtualbox_macos, + multinode_docker_ubuntu, + multinode_virtualbox_macos, + pause_preload_dockerflags_docker_ubuntu, + pause_preload_dockerflags_virtualbox_macos, ] runs-on: ubuntu-18.04 steps: From 03ec6f941996a870b114f165bd5253a7e43a038c Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 25 Jun 2020 16:09:49 -0700 Subject: [PATCH 12/12] Update docs --- site/content/en/docs/commands/start.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/site/content/en/docs/commands/start.md b/site/content/en/docs/commands/start.md index ce8780bb1c..3e38d75656 100644 --- a/site/content/en/docs/commands/start.md +++ b/site/content/en/docs/commands/start.md @@ -29,7 +29,7 @@ minikube start [flags] --auto-update-drivers If set, automatically updates drivers to the latest version. Defaults to true. (default true) --base-image string The base image to use for docker/podman drivers. Intended for local development. (default "gcr.io/k8s-minikube/kicbase:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438") --cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none. (default true) - --cni string CNI plug-in to use. Valid options: auto, calico, custom, flannel, kindnet (default: auto) + --cni string CNI plug-in to use. Valid options: auto, bridge, flannel, kindnet, or path to a CNI manifest (default: auto) --container-runtime string The container runtime to be used (docker, crio, containerd). (default "docker") --cpus int Number of CPUs allocated to Kubernetes. (default 2) --cri-socket string The cri socket path to be used. @@ -44,7 +44,7 @@ minikube start [flags] --driver string Used to specify the driver to run Kubernetes in. The list of available drivers depends on operating system. --dry-run dry-run mode. Validates configuration, but does not mutate system state --embed-certs if true, will embed the certs in kubeconfig. - --enable-default-cni DEPRECATED: Replaced by --cni=custom + --enable-default-cni DEPRECATED: Replaced by --cni=bridge --extra-config ExtraOption A set of key=value pairs that describe configuration that may be passed to different components. The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler