Merge pull request #10526 from hetong07/multinode-test

test:  Add new test for multinode: two pods on two nods have different IPS
pull/10850/head
Medya Ghazizadeh 2021-03-16 09:35:33 -07:00 committed by GitHub
commit 31f9ab55b4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 89 additions and 0 deletions

View File

@ -45,6 +45,7 @@ func TestMultiNode(t *testing.T) {
validator validatorFunc
}{
{"FreshStart2Nodes", validateMultiNodeStart},
{"DeployApp2Nodes", validateDeployAppToMultiNode},
{"AddNode", validateAddNodeToMultiNode},
{"ProfileList", validateProfileListWithMultiNode},
{"StopNode", validateStopRunningNode},
@ -387,3 +388,59 @@ func validatNameConflict(ctx context.Context, t *testing.T, profile string) {
t.Logf("failed to clean temporary profile. args %q : %v", rr.Command(), err)
}
}
func validateDeployAppToMultiNode(ctx context.Context, t *testing.T, profile string) {
// Create a deployment for app
_, err := Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "apply", "-f", "./testdata/multinodes/multinode-pod-dns-test.yaml"))
if err != nil {
t.Errorf("failed to create hello deployment to multinode cluster")
}
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "rollout", "status", "deployment/hello"))
if err != nil {
t.Errorf("failed to delploy hello to multinode cluster")
}
// resolve Pod IPs
rr, err := Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "get", "pods", "-o", "jsonpath='{.items[*].status.podIP}'"))
if err != nil {
t.Errorf("failed retrieve Pod IPs")
}
podIPs := strings.Split(strings.Trim(rr.Stdout.String(), "'"), " ")
if len(podIPs) != 2 {
t.Errorf("expected 2 Pod IPs but got %d", len(podIPs))
}
if podIPs[0] == podIPs[1] {
t.Errorf("expected 2 different pod IPs but got %s and %s", podIPs[0], podIPs[0])
}
// get Pod names
rr, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "get", "pods", "-o", "jsonpath='{.items[*].metadata.name}'"))
if err != nil {
t.Errorf("failed get Pod names")
}
podNames := strings.Split(strings.Trim(rr.Stdout.String(), "'"), " ")
// verify both Pods could resolve a public DNS
for _, name := range podNames {
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "nslookup", "kubernetes.io"))
if err != nil {
t.Errorf("Pod %s could not resolve 'kubernetes.io': %v", name, err)
}
}
// verify both pods could resolve to a local service.
for _, name := range podNames {
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "nslookup", "kubernetes.default.svc.cluster.local"))
if err != nil {
t.Errorf("Pod %s could not resolve local service (kubernetes.default.svc.cluster.local): %v", name, err)
}
}
// clean up, delete all pods
for _, name := range podNames {
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "delete", "pod", name))
if err != nil {
t.Errorf("fail to delete pod %s: %v", name, err)
}
}
}

View File

@ -0,0 +1,32 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 100%
selector:
matchLabels:
app: hello
template:
metadata:
labels:
app: hello
spec:
affinity:
# ⬇⬇⬇ This ensures pods will land on separate hosts
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions: [{ key: app, operator: In, values: [hello-from] }]
topologyKey: "kubernetes.io/hostname"
containers:
- name: hello-from
image: pbitty/hello-from:latest
ports:
- name: http
containerPort: 80
terminationGracePeriodSeconds: 1