Merge branch 'master' of github.com:kubernetes/minikube into error-codes

pull/11478/head
Sharif Elgamal 2021-05-19 15:26:11 -07:00
commit 5b512a7296
190 changed files with 1644 additions and 703 deletions

View File

@ -736,11 +736,13 @@ TAG = $(STORAGE_PROVISIONER_TAG)
.PHONY: push-storage-provisioner-manifest
push-storage-provisioner-manifest: $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~storage\-provisioner\-image\-&~g") ## Push multi-arch storage-provisioner image
ifndef CIBUILD
docker login gcr.io/k8s-minikube
endif
set -x; for arch in $(ALL_ARCH); do docker push ${IMAGE}-$${arch}:${TAG}; done
docker manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g")
set -x; for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${IMAGE}:${TAG} ${IMAGE}-$${arch}:${TAG}; done
docker manifest push $(STORAGE_PROVISIONER_MANIFEST)
$(X_BUILD_ENV) docker manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g")
set -x; for arch in $(ALL_ARCH); do $(X_BUILD_ENV) docker manifest annotate --arch $${arch} ${IMAGE}:${TAG} ${IMAGE}-$${arch}:${TAG}; done
$(X_BUILD_ENV) docker manifest push $(STORAGE_PROVISIONER_MANIFEST)
.PHONY: push-docker
push-docker: # Push docker image base on to IMAGE variable (used internally by other targets)

View File

@ -52,7 +52,7 @@ var addCacheCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
out.WarningT("\"minikube cache\" will be deprecated in upcoming versions, please switch to \"minikube image load\"")
// Cache and load images into docker daemon
if err := machine.CacheAndLoadImages(args, cacheAddProfiles()); err != nil {
if err := machine.CacheAndLoadImages(args, cacheAddProfiles(), false); err != nil {
exit.Error(reason.InternalCacheLoad, "Failed to cache and load images", err)
}
// Add images to config file

View File

@ -25,8 +25,11 @@ import (
"strings"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/node"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/reason"
)
@ -35,14 +38,16 @@ import (
var (
srcPath string
dstPath string
dstNode string
)
// cpCmd represents the cp command, similar to docker cp
var cpCmd = &cobra.Command{
Use: "cp <source file path> <target file absolute path>",
Use: "cp <source file path> <target node name>:<target file absolute path>",
Short: "Copy the specified file into minikube",
Long: "Copy the specified file into minikube, it will be saved at path <target file absolute path> in your minikube.\n" +
"Example Command : \"minikube cp a.txt /home/docker/b.txt\"\n",
"Example Command : \"minikube cp a.txt /home/docker/b.txt\"\n" +
" \"minikube cp a.txt minikube-m02:/home/docker/b.txt\"\n",
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 2 {
exit.Message(reason.Usage, `Please specify the path to copy:
@ -51,16 +56,47 @@ var cpCmd = &cobra.Command{
srcPath = args[0]
dstPath = args[1]
// if destination path is not a absolute path, trying to parse with <node>:<abs path> format
if !strings.HasPrefix(dstPath, "/") {
if sp := strings.SplitN(dstPath, ":", 2); len(sp) == 2 {
dstNode = sp[0]
dstPath = sp[1]
}
}
validateArgs(srcPath, dstPath)
co := mustload.Running(ClusterFlagValue())
fa, err := assets.NewFileAsset(srcPath, pt.Dir(dstPath), pt.Base(dstPath), "0644")
if err != nil {
out.ErrLn("%v", errors.Wrap(err, "getting file asset"))
os.Exit(1)
}
if err = co.CP.Runner.Copy(fa); err != nil {
co := mustload.Running(ClusterFlagValue())
var runner command.Runner
if dstNode == "" {
runner = co.CP.Runner
} else {
n, _, err := node.Retrieve(*co.Config, dstNode)
if err != nil {
exit.Message(reason.GuestNodeRetrieve, "Node {{.nodeName}} does not exist.", out.V{"nodeName": dstNode})
}
h, err := machine.GetHost(co.API, *co.Config, *n)
if err != nil {
out.ErrLn("%v", errors.Wrap(err, "getting host"))
os.Exit(1)
}
runner, err = machine.CommandRunner(h)
if err != nil {
out.ErrLn("%v", errors.Wrap(err, "getting command runner"))
os.Exit(1)
}
}
if err = runner.Copy(fa); err != nil {
out.ErrLn("%v", errors.Wrap(err, "copying file"))
os.Exit(1)
}

View File

@ -45,6 +45,7 @@ var (
pull bool
imgDaemon bool
imgRemote bool
overwrite bool
tag string
push bool
dockerFile string
@ -130,13 +131,13 @@ var loadImageCmd = &cobra.Command{
if imgDaemon || imgRemote {
image.UseDaemon(imgDaemon)
image.UseRemote(imgRemote)
if err := machine.CacheAndLoadImages(args, []*config.Profile{profile}); err != nil {
if err := machine.CacheAndLoadImages(args, []*config.Profile{profile}, overwrite); err != nil {
exit.Error(reason.GuestImageLoad, "Failed to load image", err)
}
} else if local {
// Load images from local files, without doing any caching or checks in container runtime
// This is similar to tarball.Image but it is done by the container runtime in the cluster.
if err := machine.DoLoadImages(args, []*config.Profile{profile}, ""); err != nil {
if err := machine.DoLoadImages(args, []*config.Profile{profile}, "", overwrite); err != nil {
exit.Error(reason.GuestImageLoad, "Failed to load image", err)
}
}
@ -248,6 +249,7 @@ func init() {
loadImageCmd.Flags().BoolVarP(&pull, "pull", "", false, "Pull the remote image (no caching)")
loadImageCmd.Flags().BoolVar(&imgDaemon, "daemon", false, "Cache image from docker daemon")
loadImageCmd.Flags().BoolVar(&imgRemote, "remote", false, "Cache image from remote registry")
loadImageCmd.Flags().BoolVar(&overwrite, "overwrite", true, "Overwrite image even if same image:tag name exists")
imageCmd.AddCommand(loadImageCmd)
imageCmd.AddCommand(removeImageCmd)
buildImageCmd.Flags().StringVarP(&tag, "tag", "t", "", "Tag to apply to the new image (optional)")

View File

@ -28,6 +28,7 @@ import (
"os/user"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
@ -215,7 +216,7 @@ func runStart(cmd *cobra.Command, args []string) {
// Walk down the rest of the options
for _, alt := range alts {
// Skip non-default drivers
if !ds.Default {
if !alt.Default {
continue
}
out.WarningT("Startup with {{.old_driver}} driver failed, trying with alternate driver {{.new_driver}}: {{.error}}", out.V{"old_driver": ds.Name, "new_driver": alt.Name, "error": err})
@ -589,7 +590,16 @@ func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []regis
pick, alts, rejects := driver.Suggest(choices)
if pick.Name == "" {
out.Step(style.ThumbsDown, "Unable to pick a default driver. Here is what was considered, in preference order:")
sort.Slice(rejects, func(i, j int) bool {
if rejects[i].Priority == rejects[j].Priority {
return rejects[i].Preference > rejects[j].Preference
}
return rejects[i].Priority > rejects[j].Priority
})
for _, r := range rejects {
if !r.Default {
continue
}
out.Infof("{{ .name }}: {{ .rejection }}", out.V{"name": r.Name, "rejection": r.Rejection})
if r.Suggestion != "" {
out.Infof("{{ .name }}: Suggestion: {{ .suggestion}}", out.V{"name": r.Name, "suggestion": r.Suggestion})

View File

@ -160,7 +160,7 @@ func initMinikubeFlags() {
startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.")
startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.")
startCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
startCmd.Flags().Bool(forceSystemd, false, "If set, force the container runtime to use sytemd as cgroup manager. Defaults to false.")
startCmd.Flags().Bool(forceSystemd, false, "If set, force the container runtime to use systemd as cgroup manager. Defaults to false.")
startCmd.Flags().StringP(network, "", "", "network to run minikube with. Now it is used by docker/podman and KVM drivers. If left empty, minikube will create a new network.")
startCmd.Flags().StringVarP(&outputFormat, "output", "o", "text", "Format to print stdout in. Options include: [text,json]")
startCmd.Flags().StringP(trace, "", "", "Send trace events. Options include: [gcp]")

View File

@ -39,8 +39,9 @@ import (
// unpauseCmd represents the docker-pause command
var unpauseCmd = &cobra.Command{
Use: "unpause",
Short: "unpause Kubernetes",
Use: "unpause",
Aliases: []string{"resume"},
Short: "unpause Kubernetes",
Run: func(cmd *cobra.Command, args []string) {
cname := ClusterFlagValue()
register.SetEventLogPath(localpath.EventLog(cname))

View File

@ -28,10 +28,10 @@ backend k8s-api-https
#tcp-request inspect-delay 10s
#tcp-request content lua.foo_action
tcp-request inspect-delay 10s
tcp-request content lua.unpause 192.168.49.2 8080
tcp-request content lua.unpause {{.NetworkInfo.ControlPlaneNodeIP}} 8080
tcp-request content reject if { var(req.blocked) -m bool }
option tcplog
option tcp-check
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server k8s-api-1 192.168.49.2:8443 check
server k8s-api-1 {{.NetworkInfo.ControlPlaneNodeIP}}:{{.NetworkInfo.ControlPlaneNodePort}} check

View File

@ -25,7 +25,22 @@ fi
VERSION_TO_INSTALL=${1}
INSTALL_PATH=${2}
ARCH=${ARCH:=amd64}
function current_arch() {
case $(arch) in
"x86_64")
echo "amd64"
;;
"aarch64")
echo "arm64"
;;
*)
echo "unexpected arch: $(arch). use amd64" 1>&2
echo "amd64"
;;
esac
}
ARCH=${ARCH:=$(current_arch)}
# installs or updates golang if right version doesn't exists
function check_and_install_golang() {
@ -62,7 +77,7 @@ function install_golang() {
# using sudo because previously installed versions might have been installed by a different user.
# as it was the case on jenkins VM.
sudo curl -qL -O "https://storage.googleapis.com/golang/go${1}.${INSTALLOS}-${ARCH}.tar.gz" &&
sudo tar -xzf go${1}.${INSTALLOS}-amd64.tar.gz &&
sudo tar -xzf go${1}.${INSTALLOS}-${ARCH}.tar.gz &&
sudo rm -rf "${2}/go" &&
sudo mv go "${2}/" && sudo chown -R $(whoami): ${2}/go
popd >/dev/null

View File

@ -0,0 +1,66 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -x -o pipefail
# Make sure docker is installed and configured
./hack/jenkins/installers/check_install_docker.sh || true
yes|gcloud auth configure-docker
# Make sure gh is installed and configured
./hack/jenkins/installers/check_install_gh.sh
if [[ $SP_VERSION != v* ]]; then
SP_VERSION=v$SP_VERSION
fi
SED="sed -i"
if [ "$(uname)" = "Darwin" ]; then
SED="sed -i ''"
fi
# Write the new version back into the Makefile
${SED} "s/STORAGE_PROVISIONER_TAG ?= .*/STORAGE_PROVISIONER_TAG ?= ${SP_VERSION}/" Makefile
# Build the new image
CIBUILD=yes make push-storage-provisioner-manifest
ec=$?
if [ $ec -gt 0 ]; then
exit $ec
fi
# Bump the preload version
PLV=$(egrep "PreloadVersion =" pkg/minikube/download/preload.go | cut -d \" -f 2)
RAW=${PLV:1}
RAW=$((RAW+1))
PLV=v${RAW}
${SED} "s/PreloadVersion = .*/PreloadVersion = \"${PLV}\"/" pkg/minikube/download/preload.go
# Open a PR with the changes
git config user.name "minikube-bot"
git config user.email "minikube-bot@google.com"
branch=storage-provisioner-${SP_VERSION}
git checkout -b ${branch}
git add Makefile pkg/minikube/download/preload.go
git commit -m "Update storage provisioner to ${SP_VERSION}"
git remote add minikube-bot git@github.com:minikube-bot/minikube.git
git push -f minikube-bot ${branch}
gh pr create --fill --base master --head minikube-bot:${branch}

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -72,4 +72,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -67,3 +67,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -69,4 +69,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -4,7 +4,7 @@ publish = "site/public/"
command = "pwd && cd themes/docsy && git submodule update -f --init && cd ../.. && hugo"
[build.environment]
HUGO_VERSION = "0.68.3"
HUGO_VERSION = "0.83.1"
[context.production.environment]
HUGO_ENV = "production"

View File

@ -219,6 +219,7 @@ https://github.com/kubernetes/minikube/issues/7332`, out.V{"driver_name": cc.Dri
var networkInfo assets.NetworkInfo
if len(cc.Nodes) >= 1 {
networkInfo.ControlPlaneNodeIP = cc.Nodes[0].IP
networkInfo.ControlPlaneNodePort = cc.Nodes[0].Port
} else {
out.WarningT("At least needs control plane nodes to enable addon")
}

View File

@ -69,14 +69,54 @@ func digDNS(ociBin, containerName, dns string) (net.IP, error) {
return ip, nil
}
// gatewayIP inspects oci container to find a gateway IP string
func gatewayIP(ociBin, containerName string) (string, error) {
rr, err := runCmd(exec.Command(ociBin, "container", "inspect", "--format", "{{.NetworkSettings.Gateway}}", containerName))
if err != nil {
return "", errors.Wrapf(err, "inspect gateway")
}
if gatewayIP := strings.TrimSpace(rr.Stdout.String()); gatewayIP != "" {
return gatewayIP, nil
}
// https://github.com/kubernetes/minikube/issues/11293
// need to check nested network
// check .NetworkSettings.Networks["cluster-name"].Gateway and then
// .NetworkSettings.Networks["bridge"|"podman"].Gateway
for _, network := range []string{containerName, defaultBridgeName(ociBin)} {
gatewayIP, err := networkGateway(ociBin, containerName, network)
// err == nil here doesn't mean we get a valid gateway IP, it still can be an empty string
if err != nil {
return "", err
}
if gatewayIP != "" {
return gatewayIP, nil
}
}
klog.Infof("Couldn't find gateway for container %s", containerName)
return "", nil
}
func networkGateway(ociBin, container, network string) (string, error) {
format := fmt.Sprintf(`
{{ if index .NetworkSettings.Networks %q}}
{{(index .NetworkSettings.Networks %q).Gateway}}
{{ end }}
`, network, network)
rr, err := runCmd(exec.Command(ociBin, "container", "inspect", "--format", format, container))
if err != nil {
return "", errors.Wrapf(err, "inspect gateway")
}
return strings.TrimSpace(rr.Stdout.String()), nil
}
// containerGatewayIP gets the default gateway ip for the container
func containerGatewayIP(ociBin string, containerName string) (net.IP, error) {
rr, err := runCmd(exec.Command(ociBin, "container", "inspect", "--format", "{{.NetworkSettings.Gateway}}", containerName))
gatewayIP, err := gatewayIP(ociBin, containerName)
if err != nil {
return nil, errors.Wrapf(err, "inspect gateway")
}
ip := net.ParseIP(strings.TrimSpace(rr.Stdout.String()))
return ip, nil
return net.ParseIP(gatewayIP), nil
}
// ForwardedPort will return port mapping for a container using cli.
@ -142,7 +182,7 @@ func podmanContainerIP(ociBin string, name string) (string, string, error) {
return "", "", errors.Wrapf(err, "podman inspect ip %s", name)
}
output := strings.TrimSpace(rr.Stdout.String())
if err == nil && output == "" { // podman returns empty for 127.0.0.1
if output == "" { // podman returns empty for 127.0.0.1
// check network, if the ip address is missing
ipv4, ipv6, err := dockerContainerIP(ociBin, name)
if err == nil {

View File

@ -41,15 +41,21 @@ const dockerDefaultBridge = "bridge"
// name of the default bridge network
const podmanDefaultBridge = "podman"
func defaultBridgeName(ociBin string) string {
switch ociBin {
case Docker:
return dockerDefaultBridge
case Podman:
return podmanDefaultBridge
default:
klog.Warningf("Unexpected oci: %v", ociBin)
return dockerDefaultBridge
}
}
// CreateNetwork creates a network returns gateway and error, minikube creates one network per cluster
func CreateNetwork(ociBin string, networkName string) (net.IP, error) {
var defaultBridgeName string
if ociBin == Docker {
defaultBridgeName = dockerDefaultBridge
}
if ociBin == Podman {
defaultBridgeName = podmanDefaultBridge
}
defaultBridgeName := defaultBridgeName(ociBin)
if networkName == defaultBridgeName {
klog.Infof("skipping creating network since default network %s was specified", networkName)
return nil, nil

View File

@ -103,11 +103,6 @@ func TestDocs(docPath string, pathToCheck string) error {
return err
}
_, err = buf.WriteString(fmt.Sprintf("TEST COUNT: %d", counter))
if err != nil {
return err
}
err = ioutil.WriteFile(docPath, buf.Bytes(), 0o644)
return err
}

View File

@ -42,7 +42,8 @@ type Addon struct {
// NetworkInfo contains control plane node IP address used for add on template
type NetworkInfo struct {
ControlPlaneNodeIP string
ControlPlaneNodeIP string
ControlPlaneNodePort int
}
// NewAddon creates a new Addon
@ -88,13 +89,13 @@ var Addons = map[string]*Addon{
"auto-pause-hook.yaml",
"0640"),
MustBinAsset(
"deploy/addons/auto-pause/haproxy.cfg",
"/var/lib/minikube/",
"deploy/addons/auto-pause/haproxy.cfg.tmpl",
vmpath.GuestPersistentDir,
"haproxy.cfg",
"0640"),
MustBinAsset(
"deploy/addons/auto-pause/unpause.lua",
"/var/lib/minikube/",
vmpath.GuestPersistentDir,
"unpause.lua",
"0640"),
MustBinAsset(
@ -486,8 +487,8 @@ var Addons = map[string]*Addon{
"metallb-config.yaml",
"0640"),
}, false, "metallb", map[string]string{
"Speaker": "metallb/speaker:v0.8.2@sha256:f1941498a28cdb332429e25d18233683da6949ecfc4f6dacf12b1416d7d38263",
"Controller": "metallb/controller:v0.8.2@sha256:5c050e59074e152711737d2bb9ede96dff67016c80cf25cdf5fc46109718a583",
"Speaker": "metallb/speaker:v0.9.6@sha256:c66585a805bed1a3b829d8fb4a4aab9d87233497244ebff96f1b88f1e7f8f991",
"Controller": "metallb/controller:v0.9.6@sha256:fbfdb9d3f55976b0ee38f3309d83a4ca703efcf15d6ca7889cd8189142286502",
}, nil),
"ambassador": NewAddon([]*BinAsset{
MustBinAsset(
@ -660,7 +661,7 @@ var Addons = map[string]*Addon{
}
// GenerateTemplateData generates template data for template assets
func GenerateTemplateData(addon *Addon, cfg config.KubernetesConfig, networkInfo NetworkInfo) interface{} {
func GenerateTemplateData(addon *Addon, cfg config.KubernetesConfig, netInfo NetworkInfo) interface{} {
a := runtime.GOARCH
// Some legacy docker images still need the -arch suffix
@ -697,7 +698,8 @@ func GenerateTemplateData(addon *Addon, cfg config.KubernetesConfig, networkInfo
}
// Network info for generating template
opts.NetworkInfo["ControlPlaneNodeIP"] = networkInfo.ControlPlaneNodeIP
opts.NetworkInfo["ControlPlaneNodeIP"] = netInfo.ControlPlaneNodeIP
opts.NetworkInfo["ControlPlaneNodePort"] = fmt.Sprint(netInfo.ControlPlaneNodePort)
if opts.Images == nil {
opts.Images = make(map[string]string) // Avoid nil access when rendering

View File

@ -91,6 +91,8 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "{{.PodSubnet }}"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
{{- range $i, $val := printMapInOrder .KubeProxyOptions ": " }}
{{$val}}
{{- end}}

View File

@ -94,6 +94,8 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "{{.PodSubnet }}"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
{{- range $i, $val := printMapInOrder .KubeProxyOptions ": " }}
{{$val}}
{{- end}}

View File

@ -123,23 +123,14 @@ func recentReleases(n int) ([]string, error) {
}
/**
Need a separate test function to test the DNS server IP
as v1.11 yaml file is very different compared to v1.12+.
This test case has only 1 thing to test and that is the
networking/dnsDomain value
*/
func TestGenerateKubeadmYAMLDNS(t *testing.T) {
// test all testdata releases greater than v1.11
versions, err := recentReleases(0)
if err != nil {
t.Errorf("versions: %v", err)
}
for i, v := range versions {
if semver.Compare(v, "v1.11") <= 0 {
versions = versions[0:i]
break
}
}
fcr := command.NewFakeCommandRunner()
fcr.SetCommandToOutput(map[string]string{
"docker info --format {{.CgroupDriver}}": "systemd\n",

View File

@ -1,22 +0,0 @@
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
noTaintMaster: true
api:
advertiseAddress: 1.1.1.1
bindPort: 12345
controlPlaneEndpoint: control-plane.minikube.internal
kubernetesVersion: v1.11.0
certificatesDir: /var/lib/minikube/certs
networking:
serviceSubnet: 10.96.0.0/12
etcd:
dataDir: /var/lib/minikube/etcd
controllerManagerExtraArgs:
leader-elect: "false"
schedulerExtraArgs:
leader-elect: "false"
nodeName: "mk"
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
criSocket: /run/containerd/containerd.sock
apiServerExtraArgs:
enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"

View File

@ -1,22 +0,0 @@
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
noTaintMaster: true
api:
advertiseAddress: 1.1.1.1
bindPort: 8443
controlPlaneEndpoint: control-plane.minikube.internal
kubernetesVersion: v1.11.0
certificatesDir: /var/lib/minikube/certs
networking:
serviceSubnet: 10.96.0.0/12
etcd:
dataDir: /var/lib/minikube/etcd
controllerManagerExtraArgs:
leader-elect: "false"
schedulerExtraArgs:
leader-elect: "false"
nodeName: "mk"
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
criSocket: /run/containerd/containerd.sock
apiServerExtraArgs:
enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"

View File

@ -1,22 +0,0 @@
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
noTaintMaster: true
api:
advertiseAddress: 1.1.1.1
bindPort: 8443
controlPlaneEndpoint: control-plane.minikube.internal
kubernetesVersion: v1.11.0
certificatesDir: /var/lib/minikube/certs
networking:
serviceSubnet: 10.96.0.0/12
etcd:
dataDir: /var/lib/minikube/etcd
controllerManagerExtraArgs:
leader-elect: "false"
schedulerExtraArgs:
leader-elect: "false"
nodeName: "mk"
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
criSocket: /run/containerd/containerd.sock
apiServerExtraArgs:
enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"

View File

@ -1,30 +0,0 @@
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
noTaintMaster: true
api:
advertiseAddress: 1.1.1.1
bindPort: 8443
controlPlaneEndpoint: control-plane.minikube.internal
kubernetesVersion: v1.11.0
certificatesDir: /var/lib/minikube/certs
networking:
serviceSubnet: 10.96.0.0/12
etcd:
dataDir: /var/lib/minikube/etcd
controllerManagerExtraArgs:
leader-elect: "false"
schedulerExtraArgs:
leader-elect: "false"
nodeName: "mk"
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
criSocket: /var/run/crio/crio.sock
apiServerExtraArgs:
enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
fail-no-swap: "true"
feature-gates: "a=b"
controllerManagerExtraArgs:
feature-gates: "a=b"
kube-api-burst: "32"
schedulerExtraArgs:
feature-gates: "a=b"
scheduler-name: "mini-scheduler"

View File

@ -1,22 +0,0 @@
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
noTaintMaster: true
api:
advertiseAddress: 1.1.1.1
bindPort: 8443
controlPlaneEndpoint: control-plane.minikube.internal
kubernetesVersion: v1.11.0
certificatesDir: /var/lib/minikube/certs
networking:
serviceSubnet: 10.96.0.0/12
etcd:
dataDir: /var/lib/minikube/etcd
controllerManagerExtraArgs:
leader-elect: "false"
schedulerExtraArgs:
leader-elect: "false"
nodeName: "mk"
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
criSocket: /var/run/crio/crio.sock
apiServerExtraArgs:
enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"

View File

@ -1,21 +0,0 @@
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
noTaintMaster: true
api:
advertiseAddress: 1.1.1.1
bindPort: 8443
controlPlaneEndpoint: control-plane.minikube.internal
kubernetesVersion: v1.11.0
certificatesDir: /var/lib/minikube/certs
networking:
serviceSubnet: 10.96.0.0/12
etcd:
dataDir: /var/lib/minikube/etcd
controllerManagerExtraArgs:
leader-elect: "false"
schedulerExtraArgs:
leader-elect: "false"
nodeName: "mk"
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
apiServerExtraArgs:
enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"

View File

@ -1,22 +0,0 @@
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
noTaintMaster: true
api:
advertiseAddress: 1.1.1.1
bindPort: 8443
controlPlaneEndpoint: control-plane.minikube.internal
kubernetesVersion: v1.11.0
certificatesDir: /var/lib/minikube/certs
networking:
serviceSubnet: 10.96.0.0/12
etcd:
dataDir: /var/lib/minikube/etcd
controllerManagerExtraArgs:
leader-elect: "false"
schedulerExtraArgs:
leader-elect: "false"
nodeName: "mk"
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
imageRepository: test/repo
apiServerExtraArgs:
enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"

View File

@ -1,26 +0,0 @@
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
noTaintMaster: true
api:
advertiseAddress: 1.1.1.1
bindPort: 8443
controlPlaneEndpoint: control-plane.minikube.internal
kubernetesVersion: v1.11.0
certificatesDir: /var/lib/minikube/certs
networking:
serviceSubnet: 10.96.0.0/12
etcd:
dataDir: /var/lib/minikube/etcd
controllerManagerExtraArgs:
leader-elect: "false"
schedulerExtraArgs:
leader-elect: "false"
nodeName: "mk"
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
apiServerExtraArgs:
enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
fail-no-swap: "true"
controllerManagerExtraArgs:
kube-api-burst: "32"
schedulerExtraArgs:
scheduler-name: "mini-scheduler"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -72,4 +72,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -67,3 +67,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -69,4 +69,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -72,4 +72,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -67,3 +67,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -69,4 +69,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -72,4 +72,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -67,3 +67,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -69,4 +69,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -72,4 +72,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -67,3 +67,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -69,4 +69,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -72,4 +72,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -67,3 +67,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -69,4 +69,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -72,4 +72,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -67,3 +67,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -69,4 +69,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -72,4 +72,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -67,3 +67,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

View File

@ -69,4 +69,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
mode: "iptables"

View File

@ -66,3 +66,5 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0

Some files were not shown because too many files have changed in this diff Show More