Merge pull request #8858 from sharifelgamal/add-kubectl
check kubectl for multinode delete testpull/9199/head
commit
a8d95357c2
|
@ -657,7 +657,11 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC
|
||||||
|
|
||||||
out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd))
|
out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out.Output())
|
if strings.Contains(err.Error(), "status \"Ready\" already exists in the cluster") {
|
||||||
|
glog.Infof("Node %s already joined the cluster, skip failure.", n.Name)
|
||||||
|
} else {
|
||||||
|
return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out.Output())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,7 +65,7 @@ func TestMultiNode(t *testing.T) {
|
||||||
|
|
||||||
func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) {
|
func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) {
|
||||||
// Start a 2 node cluster with the --nodes param
|
// Start a 2 node cluster with the --nodes param
|
||||||
startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory=2200", "--nodes=2"}, StartArgs()...)
|
startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory=2200", "--nodes=2", "-v=8", "--alsologtostderr"}, StartArgs()...)
|
||||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err)
|
t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err)
|
||||||
|
@ -222,12 +222,14 @@ func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Restart a full cluster with minikube start
|
// Restart a full cluster with minikube start
|
||||||
startArgs := append([]string{"start", "-p", profile, "--wait=true"}, StartArgs()...)
|
startArgs := append([]string{"start", "-p", profile, "--wait=true", "-v=8", "--alsologtostderr"}, StartArgs()...)
|
||||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err)
|
t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
time.Sleep(Seconds(45))
|
||||||
|
|
||||||
// Make sure minikube status shows 2 running nodes
|
// Make sure minikube status shows 2 running nodes
|
||||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr"))
|
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -242,8 +244,6 @@ func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile
|
||||||
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Output())
|
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Output())
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(Seconds(30))
|
|
||||||
|
|
||||||
// Make sure kubectl reports that all nodes are ready
|
// Make sure kubectl reports that all nodes are ready
|
||||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes"))
|
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -294,4 +294,20 @@ func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure kubectl knows the node is gone
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
if strings.Count(rr.Stdout.String(), "NotReady") > 0 {
|
||||||
|
t.Errorf("expected 2 nodes to be Ready, got %v", rr.Output())
|
||||||
|
}
|
||||||
|
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes", "-o", `go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'`))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
if strings.Count(rr.Stdout.String(), "True") != 2 {
|
||||||
|
t.Errorf("expected 2 nodes Ready status to be True, got %v", rr.Output())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue