tests pass :)
parent
7de758d515
commit
c81e24ea9f
|
@ -20,12 +20,9 @@ package integration
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/minikube/cmd/minikube/cmd"
|
||||
)
|
||||
|
||||
func TestMultiNode(t *testing.T) {
|
||||
|
@ -44,11 +41,11 @@ func TestMultiNode(t *testing.T) {
|
|||
name string
|
||||
validator validatorFunc
|
||||
}{
|
||||
{"StartWithParam", validateStart},
|
||||
{"AddNode", validateAddNode},
|
||||
{"StopNode", validateStopNode},
|
||||
{"StartNode", validateStartNode},
|
||||
{"DeleteNode", validateDeleteNode},
|
||||
{"FreshStart2Nodes", validateMultiNodeStart},
|
||||
{"AddNode", validateAddNodeToMultiNode},
|
||||
{"StopNode", validateStopRunningNode},
|
||||
{"StartAfterStop", validateStartNodeAfterStop},
|
||||
{"DeleteNode", validateDeleteNodeFromMultiNode},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
|
@ -59,7 +56,7 @@ func TestMultiNode(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func validateStart(ctx context.Context, t *testing.T, profile string) {
|
||||
func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) {
|
||||
// Start a 2 node cluster with the --nodes param
|
||||
startArgs := append([]string{"start", "-p", profile, "--wait=true", "--nodes=2"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
|
@ -83,7 +80,7 @@ func validateStart(ctx context.Context, t *testing.T, profile string) {
|
|||
|
||||
}
|
||||
|
||||
func validateAddNode(ctx context.Context, t *testing.T, profile string) {
|
||||
func validateAddNodeToMultiNode(ctx context.Context, t *testing.T, profile string) {
|
||||
// Add a node to the current cluster
|
||||
addArgs := []string{"node", "add", "-p", profile, "-v", "3", "--alsologtostderr"}
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...))
|
||||
|
@ -98,86 +95,67 @@ func validateAddNode(ctx context.Context, t *testing.T, profile string) {
|
|||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "host: Running") != 3 {
|
||||
t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
t.Errorf("status says all hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 {
|
||||
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
t.Errorf("status says all kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
}
|
||||
|
||||
func validateStopNode(ctx context.Context, t *testing.T, profile string) {
|
||||
// Grab a worker node name
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--output", "json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
sts := []cmd.Status{}
|
||||
err = json.Unmarshal(rr.Stdout.Bytes(), &sts)
|
||||
|
||||
if len(sts) != 3 {
|
||||
t.Fatalf("status has the incorrect number of nodes: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
|
||||
func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) {
|
||||
// Names are autogenerated using the node.Name() function
|
||||
name := "m02"
|
||||
name := "m03"
|
||||
|
||||
// Run minikube node stop on that node
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", name))
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", name))
|
||||
if err != nil {
|
||||
t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Run status again to see the stopped host
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--output", "json"))
|
||||
if err != nil {
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
|
||||
// Exit code 7 means one host is stopped, which we are expecting
|
||||
if err != nil && rr.ExitCode != 7 {
|
||||
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
stopped := 0
|
||||
for _, st := range sts {
|
||||
if st.Host == "Stopped" {
|
||||
stopped += 1
|
||||
// Make sure minikube status shows 2 running nodes and 1 stopped one
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
|
||||
if err != nil && rr.ExitCode != 7 {
|
||||
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 {
|
||||
t.Errorf("incorrect number of running kubelets: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "host: Stopped") != 1 {
|
||||
t.Errorf("incorrect number of stopped hosts: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "kubelet: Stopped") != 1 {
|
||||
t.Errorf("incorrect number of stopped kubelets: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
}
|
||||
|
||||
if stopped == 0 {
|
||||
t.Errorf("no nodes were stopped: %v", rr.Stdout.String())
|
||||
} else if stopped > 1 {
|
||||
t.Errorf("too many nodes were stopped: %v", rr.Stdout.String())
|
||||
}
|
||||
}
|
||||
func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile string) {
|
||||
// TODO (#7496): remove skip once restarts work
|
||||
t.Skip("Restarting nodes is broken :(")
|
||||
|
||||
func validateStartNode(ctx context.Context, t *testing.T, profile string) {
|
||||
// Grab the stopped node
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--output", "json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
sts := []cmd.Status{}
|
||||
err = json.Unmarshal(rr.Stdout.Bytes(), &sts)
|
||||
|
||||
if len(sts) != 3 {
|
||||
t.Fatalf("status has the incorrect number of nodes: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
|
||||
var name string
|
||||
for _, st := range sts {
|
||||
if st.Host == "Stopped" {
|
||||
name = st.Name
|
||||
}
|
||||
}
|
||||
|
||||
if name == "" {
|
||||
t.Fatalf("Could not find stopped node")
|
||||
}
|
||||
name := "m03"
|
||||
|
||||
// Start the node back up
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", name))
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", name))
|
||||
if err != nil {
|
||||
t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err)
|
||||
t.Errorf("node start returned an error. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Make sure minikube status shows 3 running hosts
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "host: Running") != 3 {
|
||||
|
@ -189,27 +167,21 @@ func validateStartNode(ctx context.Context, t *testing.T, profile string) {
|
|||
}
|
||||
}
|
||||
|
||||
func validateDeleteNode(ctx context.Context, t *testing.T, profile string) {
|
||||
// Grab a worker node name
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--output", "json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile string) {
|
||||
name := "m03"
|
||||
|
||||
sts := []cmd.Status{}
|
||||
err = json.Unmarshal(rr.Stdout.Bytes(), &sts)
|
||||
|
||||
if len(sts) != 3 {
|
||||
t.Fatalf("status has the incorrect number of nodes: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
|
||||
name := "m02"
|
||||
// Start the node back up
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "delete", name))
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "delete", name))
|
||||
if err != nil {
|
||||
t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Make sure status is back down to 2 hosts
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "host: Running") != 2 {
|
||||
t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue