Merge pull request #8858 from sharifelgamal/add-kubectl

check kubectl for multinode delete test
pull/9199/head
Sharif Elgamal 2020-09-08 11:13:26 -07:00 committed by GitHub
commit a8d95357c2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 25 additions and 5 deletions

View File

@ -657,7 +657,11 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC
out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd))
if err != nil {
return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out.Output())
if strings.Contains(err.Error(), "status \"Ready\" already exists in the cluster") {
glog.Infof("Node %s already joined the cluster, skip failure.", n.Name)
} else {
return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out.Output())
}
}
return nil
}

View File

@ -65,7 +65,7 @@ func TestMultiNode(t *testing.T) {
func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) {
// Start a 2 node cluster with the --nodes param
startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory=2200", "--nodes=2"}, StartArgs()...)
startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory=2200", "--nodes=2", "-v=8", "--alsologtostderr"}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err)
@ -222,12 +222,14 @@ func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile
}
}
// Restart a full cluster with minikube start
startArgs := append([]string{"start", "-p", profile, "--wait=true"}, StartArgs()...)
startArgs := append([]string{"start", "-p", profile, "--wait=true", "-v=8", "--alsologtostderr"}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err)
}
time.Sleep(Seconds(45))
// Make sure minikube status shows 2 running nodes
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr"))
if err != nil {
@ -242,8 +244,6 @@ func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Output())
}
time.Sleep(Seconds(30))
// Make sure kubectl reports that all nodes are ready
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes"))
if err != nil {
@ -294,4 +294,20 @@ func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile
}
}
// Make sure kubectl knows the node is gone
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes"))
if err != nil {
t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err)
}
if strings.Count(rr.Stdout.String(), "NotReady") > 0 {
t.Errorf("expected 2 nodes to be Ready, got %v", rr.Output())
}
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes", "-o", `go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'`))
if err != nil {
t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err)
}
if strings.Count(rr.Stdout.String(), "True") != 2 {
t.Errorf("expected 2 nodes Ready status to be True, got %v", rr.Output())
}
}