add more detailed multinode tests

pull/7810/head
Sharif Elgamal 2020-04-20 15:01:21 -07:00
parent b45112da30
commit 7de758d515
5 changed files with 202 additions and 36 deletions

View File

@ -75,7 +75,6 @@ var nodeStartCmd = &cobra.Command{
}
func init() {
nodeStartCmd.Flags().String("name", "", "The name of the node to start")
nodeStartCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
nodeCmd.AddCommand(nodeStartCmd)
}

View File

@ -53,6 +53,5 @@ var nodeStopCmd = &cobra.Command{
}
func init() {
nodeStopCmd.Flags().String("name", "", "The name of the node to delete")
nodeCmd.AddCommand(nodeStopCmd)
}

View File

@ -104,12 +104,11 @@ var statusCmd = &cobra.Command{
cname := ClusterFlagValue()
api, cc := mustload.Partial(cname)
var st *Status
var err error
var statuses []*Status
for _, n := range cc.Nodes {
glog.Infof("checking status of %s ...", n.Name)
machineName := driver.MachineName(*cc, n)
st, err = status(api, *cc, n)
st, err := status(api, *cc, n)
glog.Infof("%s status: %+v", machineName, st)
if err != nil {
@ -118,36 +117,40 @@ var statusCmd = &cobra.Command{
if st.Host == Nonexistent {
glog.Errorf("The %q host does not exist!", machineName)
}
statuses = append(statuses, st)
}
switch strings.ToLower(output) {
case "text":
switch strings.ToLower(output) {
case "text":
for _, st := range statuses {
if err := statusText(st, os.Stdout); err != nil {
exit.WithError("status text failure", err)
}
case "json":
if err := statusJSON(st, os.Stdout); err != nil {
exit.WithError("status json failure", err)
}
default:
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
}
case "json":
if err := statusJSON(statuses, os.Stdout); err != nil {
exit.WithError("status json failure", err)
}
default:
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
}
// TODO: Update for multi-node
os.Exit(exitCode(st))
os.Exit(exitCode(statuses))
},
}
func exitCode(st *Status) int {
func exitCode(statuses []*Status) int {
c := 0
if st.Host != state.Running.String() {
c |= minikubeNotRunningStatusFlag
}
if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() {
c |= clusterNotRunningStatusFlag
}
if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant {
c |= k8sNotRunningStatusFlag
for _, st := range statuses {
if st.Host != state.Running.String() {
c |= minikubeNotRunningStatusFlag
}
if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() {
c |= clusterNotRunningStatusFlag
}
if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant {
c |= k8sNotRunningStatusFlag
}
}
return c
}
@ -270,8 +273,15 @@ func statusText(st *Status, w io.Writer) error {
return nil
}
func statusJSON(st *Status, w io.Writer) error {
js, err := json.Marshal(st)
func statusJSON(st []*Status, w io.Writer) error {
var js []byte
var err error
// Keep backwards compat with single node clusters to not break anyone
if len(st) == 1 {
js, err = json.Marshal(st[0])
} else {
js, err = json.Marshal(st)
}
if err != nil {
return err
}

View File

@ -35,7 +35,7 @@ func TestExitCode(t *testing.T) {
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := exitCode(tc.state)
got := exitCode([]*Status{tc.state})
if got != tc.want {
t.Errorf("exitcode(%+v) = %d, want: %d", tc.state, got, tc.want)
}
@ -93,7 +93,7 @@ func TestStatusJSON(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
var b bytes.Buffer
err := statusJSON(tc.state, &b)
err := statusJSON([]*Status{tc.state}, &b)
if err != nil {
t.Errorf("json(%+v) error: %v", tc.state, err)
}

View File

@ -20,33 +20,54 @@ package integration
import (
"context"
"encoding/json"
"os/exec"
"strings"
"testing"
"k8s.io/minikube/cmd/minikube/cmd"
)
func TestMultiNode(t *testing.T) {
if NoneDriver() {
t.Skip("none driver does not support multinode")
}
MaybeParallel(t)
type validatorFunc func(context.Context, *testing.T, string)
profile := UniqueProfileName("multinode")
ctx, cancel := context.WithTimeout(context.Background(), Minutes(30))
defer CleanupWithLogs(t, profile, cancel)
startArgs := append([]string{"start", "-p", profile, "--wait=true"}, StartArgs()...)
t.Run("serial", func(t *testing.T) {
tests := []struct {
name string
validator validatorFunc
}{
{"StartWithParam", validateStart},
{"AddNode", validateAddNode},
{"StopNode", validateStopNode},
{"StartNode", validateStartNode},
{"DeleteNode", validateDeleteNode},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
tc.validator(ctx, t, profile)
})
}
})
}
func validateStart(ctx context.Context, t *testing.T, profile string) {
// Start a 2 node cluster with the --nodes param
startArgs := append([]string{"start", "-p", profile, "--wait=true", "--nodes=2"}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err)
}
// Add a node to the current cluster
addArgs := []string{"node", "add", "-p", profile, "-v", "3", "--alsologtostderr"}
rr, err = Run(t, exec.CommandContext(ctx, Target(), addArgs...))
if err != nil {
t.Fatalf("failed to add node to current cluster. args %q : %v", rr.Command(), err)
}
// Make sure minikube status shows 2 nodes
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
if err != nil {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
@ -61,3 +82,140 @@ func TestMultiNode(t *testing.T) {
}
}
func validateAddNode(ctx context.Context, t *testing.T, profile string) {
// Add a node to the current cluster
addArgs := []string{"node", "add", "-p", profile, "-v", "3", "--alsologtostderr"}
rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...))
if err != nil {
t.Fatalf("failed to add node to current cluster. args %q : %v", rr.Command(), err)
}
// Make sure minikube status shows 3 nodes
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
if err != nil {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
if strings.Count(rr.Stdout.String(), "host: Running") != 3 {
t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 {
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
}
func validateStopNode(ctx context.Context, t *testing.T, profile string) {
// Grab a worker node name
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--output", "json"))
if err != nil {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
sts := []cmd.Status{}
err = json.Unmarshal(rr.Stdout.Bytes(), &sts)
if len(sts) != 3 {
t.Fatalf("status has the incorrect number of nodes: args %q: %v", rr.Command(), rr.Stdout.String())
}
// Names are autogenerated using the node.Name() function
name := "m02"
// Run minikube node stop on that node
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", name))
if err != nil {
t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err)
}
// Run status again to see the stopped host
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--output", "json"))
if err != nil {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
stopped := 0
for _, st := range sts {
if st.Host == "Stopped" {
stopped += 1
}
}
if stopped == 0 {
t.Errorf("no nodes were stopped: %v", rr.Stdout.String())
} else if stopped > 1 {
t.Errorf("too many nodes were stopped: %v", rr.Stdout.String())
}
}
func validateStartNode(ctx context.Context, t *testing.T, profile string) {
// Grab the stopped node
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--output", "json"))
if err != nil {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
sts := []cmd.Status{}
err = json.Unmarshal(rr.Stdout.Bytes(), &sts)
if len(sts) != 3 {
t.Fatalf("status has the incorrect number of nodes: args %q: %v", rr.Command(), rr.Stdout.String())
}
var name string
for _, st := range sts {
if st.Host == "Stopped" {
name = st.Name
}
}
if name == "" {
t.Fatalf("Could not find stopped node")
}
// Start the node back up
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", name))
if err != nil {
t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err)
}
if strings.Count(rr.Stdout.String(), "host: Running") != 3 {
t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 {
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
}
func validateDeleteNode(ctx context.Context, t *testing.T, profile string) {
// Grab a worker node name
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--output", "json"))
if err != nil {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
sts := []cmd.Status{}
err = json.Unmarshal(rr.Stdout.Bytes(), &sts)
if len(sts) != 3 {
t.Fatalf("status has the incorrect number of nodes: args %q: %v", rr.Command(), rr.Stdout.String())
}
name := "m02"
// Start the node back up
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "delete", name))
if err != nil {
t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err)
}
if strings.Count(rr.Stdout.String(), "host: Running") != 2 {
t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 {
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
}