Multinode-test: create 2 pods deployment and validate the DNS is available to them.

pull/10526/head
hetong07 2021-03-04 16:01:26 -08:00
parent 09ee84d530
commit 24df623049
2 changed files with 81 additions and 0 deletions

View File

@ -45,6 +45,7 @@ func TestMultiNode(t *testing.T) {
validator validatorFunc
}{
{"FreshStart2Nodes", validateMultiNodeStart},
{"DeployApp2Nodes", validateDeployAppToMultiNode},
{"AddNode", validateAddNodeToMultiNode},
{"ProfileList", validateProfileListWithMultiNode},
{"StopNode", validateStopRunningNode},
@ -387,3 +388,51 @@ func validatNameConflict(ctx context.Context, t *testing.T, profile string) {
t.Logf("failed to clean temporary profile. args %q : %v", rr.Command(), err)
}
}
func validateDeployAppToMultiNode(ctx context.Context, t *testing.T, profile string) {
// Create a deployment for app
_, err := Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "apply", "-f", "./testdata/multinodes/multinode-test-deployment.yaml"))
if err != nil {
t.Errorf("failed to create hello deployment to multinode cluster")
}
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "rollout", "status", "deployment/hello"))
if err != nil {
t.Errorf("failed to delploy hello to multinode cluster")
}
// resolve Pod IPs
rr, err := Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "get", "pods", "-o", "jsonpath='{.items[*].status.podIP}'"))
if err != nil {
t.Errorf("failed retrieve Pod IPs")
}
podIPs := strings.Split(strings.Trim(rr.Stdout.String(), "'"), " ")
if len(podIPs) != 2 {
t.Errorf("expected 2 Pod IPs but got %d", len(podIPs))
}
if podIPs[0] == podIPs[1] {
t.Errorf("expected 2 different pod IPs but got %s and %s", podIPs[0], podIPs[0])
}
// get Pod names
rr, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "get", "pods", "-o", "jsonpath='{.items[*].metadata.name}'"))
if err != nil {
t.Errorf("failed get Pod names")
}
podNames := strings.Split(strings.Trim(rr.Stdout.String(), "'"), " ")
// verify both Pods could resolve a public DNS
for _, name := range podNames {
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "nslookup", "kubernetes.io"))
if err != nil {
t.Errorf("Pod %s could not resolve 'kubernetes.io': %v", name, err)
}
}
// clean up, delete all pods
for _, name := range podNames {
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "delete", "pod", name))
if err != nil {
t.Errorf("fail to delete pod %s: %v", name, err)
}
}
}

View File

@ -0,0 +1,32 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 100%
selector:
matchLabels:
app: hello
template:
metadata:
labels:
app: hello
spec:
affinity:
# ⬇⬇⬇ This ensures pods will land on separate hosts
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions: [{ key: app, operator: In, values: [hello-from] }]
topologyKey: "kubernetes.io/hostname"
containers:
- name: hello-from
image: pbitty/hello-from:latest
ports:
- name: http
containerPort: 80
terminationGracePeriodSeconds: 1