multinode: fix kindnet and dns issues

pull/10985/head
Predrag Rogic 2021-04-05 02:36:40 +01:00
parent 6786da83f3
commit 387ad774bb
No known key found for this signature in database
GPG Key ID: F1FF5748C4855229
8 changed files with 101 additions and 61 deletions

View File

@ -392,6 +392,9 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
if _, ok := cnm.(cni.Disabled); !ok {
klog.Infof("Found %q CNI - setting NetworkPlugin=cni", cnm)
cc.KubernetesConfig.NetworkPlugin = "cni"
if err := setCNIConfDir(&cc, cnm); err != nil {
klog.Errorf("unable to set CNI Config Directory: %v", err)
}
}
}
@ -415,6 +418,24 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
return createNode(cc, kubeNodeName, existing)
}
// setCNIConfDir sets kubelet's '--cni-conf-dir' flag to custom CNI Config Directory path (same used also by CNI Deployment) to avoid conflicting CNI configs.
// ref: https://github.com/kubernetes/minikube/issues/10984
// Note: currently, this change affects only Kindnet CNI (and all multinodes using it), but it can be easily expanded to other/all CNIs if needed.
func setCNIConfDir(cc *config.ClusterConfig, cnm cni.Manager) error {
if _, kindnet := cnm.(cni.KindNet); kindnet {
// auto-set custom CNI Config Directory, if not user-specified
eo := fmt.Sprintf("kubelet.cni-conf-dir=%s", cni.CustomCNIConfDir)
if !cc.KubernetesConfig.ExtraOptions.Exists(eo) {
klog.Infof("auto-setting extra-config to %q", eo)
if err := cc.KubernetesConfig.ExtraOptions.Set(eo); err != nil {
return fmt.Errorf("failed auto-setting extra-config %q: %v", eo, err)
}
klog.Infof("extra-config set to %q", eo)
}
}
return nil
}
func checkNumaCount(k8sVersion string) {
if viper.GetInt(kvmNUMACount) < 1 || viper.GetInt(kvmNUMACount) > 8 {
exit.Message(reason.Usage, "--kvm-numa-count range is 1-8")

View File

@ -65,11 +65,11 @@ const domainTmpl = `
<target dev='hda' bus='virtio'/>
</disk>
<interface type='network'>
<source network='{{.Network}}'/>
<source network='{{.PrivateNetwork}}'/>
<model type='virtio'/>
</interface>
<interface type='network'>
<source network='{{.PrivateNetwork}}'/>
<source network='{{.Network}}'/>
<model type='virtio'/>
</interface>
<serial type='pty'>

View File

@ -39,6 +39,12 @@ const (
DefaultPodCIDR = "10.244.0.0/16"
)
var (
// CustomCNIConfDir is the custom CNI Config Directory path used to avoid conflicting CNI configs
// ref: https://github.com/kubernetes/minikube/issues/10984
CustomCNIConfDir = "/etc/cni/net.mk"
)
// Runner is the subset of command.Runner this package consumes
type Runner interface {
RunCmd(cmd *exec.Cmd) (*command.RunResult, error)
@ -62,6 +68,7 @@ type tmplInput struct {
ImageName string
PodCIDR string
DefaultRoute string
CNIConfDir string
}
// New returns a new CNI manager
@ -73,6 +80,12 @@ func New(cc config.ClusterConfig) (Manager, error) {
klog.Infof("Creating CNI manager for %q", cc.KubernetesConfig.CNI)
// respect user-specified custom CNI Config Directory, if any
userCNIConfDir := cc.KubernetesConfig.ExtraOptions.Get("cni-conf-dir", "kubelet")
if userCNIConfDir != "" {
CustomCNIConfDir = userCNIConfDir
}
switch cc.KubernetesConfig.CNI {
case "", "auto":
return chooseDefault(cc), nil

View File

@ -130,7 +130,8 @@ spec:
volumes:
- name: cni-cfg
hostPath:
path: /etc/cni/net.d
path: {{.CNIConfDir}}
type: DirectoryOrCreate
- name: xtables-lock
hostPath:
path: /run/xtables.lock
@ -158,6 +159,7 @@ func (c KindNet) manifest() (assets.CopyableFile, error) {
DefaultRoute: "0.0.0.0/0", // assumes IPv4
PodCIDR: DefaultPodCIDR,
ImageName: images.KindNet(c.cc.KubernetesConfig.ImageRepository),
CNIConfDir: CustomCNIConfDir,
}
b := bytes.Buffer{}

View File

@ -22,7 +22,7 @@ spec:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions: [{ key: app, operator: In, values: [hello-from] }]
matchExpressions: [{ key: app, operator: In, values: [hello] }]
topologyKey: "kubernetes.io/hostname"
containers:
- name: hello-from

View File

@ -22,23 +22,24 @@ date: 2019-11-24
minikube start --nodes 2 -p multinode-demo
```
```
😄 [multinode-demo] minikube v1.16.0 on Darwin 10.15.7
✨ Automatically selected the docker driver. Other choices: hyperkit, virtualbox
😄 [multinode-demo] minikube v1.18.1 on Opensuse-Tumbleweed
✨ Automatically selected the docker driver
👍 Starting control plane node multinode-demo in cluster multinode-demo
🔥 Creating docker container (CPUs=2, Memory=2200MB) ...
🐳 Preparing Kubernetes v1.20.0 on Docker 20.10.0 ...
🔗 Configuring CNI (Container Networking Interface) ...
🔥 Creating docker container (CPUs=2, Memory=8000MB) ...
🐳 Preparing Kubernetes v1.20.2 on Docker 20.10.3 ...
▪ Generating certificates and keys ...
▪ Booting up control plane ...
▪ Configuring RBAC rules ...
🔗 Configuring CNI (Container Networking Interface) ...
🔎 Verifying Kubernetes components...
▪ Using image gcr.io/k8s-minikube/storage-provisioner:v5
🌟 Enabled addons: storage-provisioner, default-storageclass
👍 Starting node multinode-demo-m02 in cluster multinode-demo
🔥 Creating docker container (CPUs=2, Memory=2200MB) ...
🔥 Creating docker container (CPUs=2, Memory=8000MB) ...
🌐 Found network options:
▪ NO_PROXY=192.168.49.2
🐳 Preparing Kubernetes v1.20.0 on Docker 20.10.0 ...
🐳 Preparing Kubernetes v1.20.2 on Docker 20.10.3 ...
▪ env NO_PROXY=192.168.49.2
🔎 Verifying Kubernetes components...
🏄 Done! kubectl is now configured to use "multinode-demo" cluster and "default" namespace by default
@ -50,9 +51,9 @@ minikube start --nodes 2 -p multinode-demo
kubectl get nodes
```
```
NAME STATUS ROLES AGE VERSION
multinode-demo Ready master 72s v1.18.2
multinode-demo-m02 Ready <none> 33s v1.18.2
NAME STATUS ROLES AGE VERSION
multinode-demo Ready control-plane,master 99s v1.20.2
multinode-demo-m02 Ready <none> 73s v1.20.2
```
- You can also check the status of your nodes:
@ -68,7 +69,6 @@ host: Running
kubelet: Running
apiserver: Running
kubeconfig: Configured
timeToStop: Nonexistent
multinode-demo-m02
type: Worker
@ -106,9 +106,9 @@ service/hello created
kubectl get pods -o wide
```
```
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
hello-c7b8df44f-qbhxh 1/1 Running 0 31s 10.244.0.3 multinode-demo <none> <none>
hello-c7b8df44f-xv4v6 1/1 Running 0 31s 10.244.0.2 multinode-demo <none> <none>
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
hello-695c67cf9c-bzrzk 1/1 Running 0 22s 10.244.1.2 multinode-demo-m02 <none> <none>
hello-695c67cf9c-frcvw 1/1 Running 0 22s 10.244.0.3 multinode-demo <none> <none>
```
- Look at our service, to know what URL to hit
@ -117,31 +117,31 @@ hello-c7b8df44f-xv4v6 1/1 Running 0 31s 10.244.0.2 multinod
minikube service list -p multinode-demo
```
```
|-------------|------------|--------------|-----------------------------|
| NAMESPACE | NAME | TARGET PORT | URL |
|-------------|------------|--------------|-----------------------------|
| default | hello | 80 | http://192.168.64.226:31000 |
| default | kubernetes | No node port | |
| kube-system | kube-dns | No node port | |
|-------------|------------|--------------|-----------------------------|
|-------------|------------|--------------|---------------------------|
| NAMESPACE | NAME | TARGET PORT | URL |
|-------------|------------|--------------|---------------------------|
| default | hello | 80 | http://192.168.49.2:31000 |
| default | kubernetes | No node port | |
| kube-system | kube-dns | No node port | |
|-------------|------------|--------------|---------------------------|
```
- Let's hit the URL a few times and see what comes back
```shell
curl http://192.168.64.226:31000
curl http://192.168.49.2:31000
```
```
Hello from hello-c7b8df44f-qbhxh (10.244.0.3)
Hello from hello-695c67cf9c-frcvw (10.244.0.3)
curl http://192.168.64.226:31000
Hello from hello-c7b8df44f-qbhxh (10.244.0.3)
curl http://192.168.49.2:31000
Hello from hello-695c67cf9c-bzrzk (10.244.1.2)
curl http://192.168.64.226:31000
Hello from hello-c7b8df44f-xv4v6 (10.244.0.2)
curl http://192.168.49.2:31000
Hello from hello-695c67cf9c-bzrzk (10.244.1.2)
curl http://192.168.64.226:31000
Hello from hello-c7b8df44f-xv4v6 (10.244.0.2)
curl http://192.168.49.2:31000
Hello from hello-695c67cf9c-frcvw (10.244.0.3)
```
- Multiple nodes!

View File

@ -393,12 +393,12 @@ func validateDeployAppToMultiNode(ctx context.Context, t *testing.T, profile str
// Create a deployment for app
_, err := Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "apply", "-f", "./testdata/multinodes/multinode-pod-dns-test.yaml"))
if err != nil {
t.Errorf("failed to create hello deployment to multinode cluster")
t.Errorf("failed to create busybox deployment to multinode cluster")
}
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "rollout", "status", "deployment/hello"))
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "rollout", "status", "deployment/busybox"))
if err != nil {
t.Errorf("failed to delploy hello to multinode cluster")
t.Errorf("failed to delploy busybox to multinode cluster")
}
// resolve Pod IPs
@ -423,24 +423,25 @@ func validateDeployAppToMultiNode(ctx context.Context, t *testing.T, profile str
// verify both Pods could resolve a public DNS
for _, name := range podNames {
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "nslookup", "kubernetes.io"))
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "--", "nslookup", "kubernetes.io"))
if err != nil {
t.Errorf("Pod %s could not resolve 'kubernetes.io': %v", name, err)
}
}
// verify both Pods could resolve "kubernetes.default"
// this one is also checked by k8s e2e node conformance tests:
// https://github.com/kubernetes/kubernetes/blob/f137c4777095b3972e2dd71a01365d47be459389/test/e2e_node/environment/conformance.go#L125-L179
for _, name := range podNames {
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "--", "nslookup", "kubernetes.default"))
if err != nil {
t.Errorf("Pod %s could not resolve 'kubernetes.default': %v", name, err)
}
}
// verify both pods could resolve to a local service.
for _, name := range podNames {
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "nslookup", "kubernetes.default.svc.cluster.local"))
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "--", "nslookup", "kubernetes.default.svc.cluster.local"))
if err != nil {
t.Errorf("Pod %s could not resolve local service (kubernetes.default.svc.cluster.local): %v", name, err)
}
}
// clean up, delete all pods
for _, name := range podNames {
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "delete", "pod", name))
if err != nil {
t.Errorf("fail to delete pod %s: %v", name, err)
}
}
}

View File

@ -1,32 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello
name: busybox
labels:
app: busybox
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 100%
selector:
matchLabels:
app: hello
app: busybox
template:
metadata:
labels:
app: hello
app: busybox
spec:
containers:
- name: busybox
# flaky nslookup in busybox versions newer than 1.28:
# https://github.com/docker-library/busybox/issues/48
# note: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3
# has similar issues (ie, resolves but returns exit 1)
image: busybox:1.28
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always
affinity:
# ⬇⬇⬇ This ensures pods will land on separate hosts
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions: [{ key: app, operator: In, values: [hello-from] }]
matchExpressions: [{ key: app, operator: In, values: [busybox] }]
topologyKey: "kubernetes.io/hostname"
containers:
- name: hello-from
image: pbitty/hello-from:latest
ports:
- name: http
containerPort: 80
terminationGracePeriodSeconds: 1