Merge pull request #7810 from sharifelgamal/multitest
add more detailed multinode testspull/7838/head
commit
b4a6633fd1
|
@ -75,7 +75,6 @@ var nodeStartCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func init() {
|
||||
nodeStartCmd.Flags().String("name", "", "The name of the node to start")
|
||||
nodeStartCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
nodeCmd.AddCommand(nodeStartCmd)
|
||||
}
|
||||
|
|
|
@ -53,6 +53,5 @@ var nodeStopCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func init() {
|
||||
nodeStopCmd.Flags().String("name", "", "The name of the node to delete")
|
||||
nodeCmd.AddCommand(nodeStopCmd)
|
||||
}
|
||||
|
|
|
@ -104,12 +104,11 @@ var statusCmd = &cobra.Command{
|
|||
cname := ClusterFlagValue()
|
||||
api, cc := mustload.Partial(cname)
|
||||
|
||||
var st *Status
|
||||
var err error
|
||||
var statuses []*Status
|
||||
for _, n := range cc.Nodes {
|
||||
glog.Infof("checking status of %s ...", n.Name)
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
st, err = status(api, *cc, n)
|
||||
st, err := status(api, *cc, n)
|
||||
glog.Infof("%s status: %+v", machineName, st)
|
||||
|
||||
if err != nil {
|
||||
|
@ -118,36 +117,40 @@ var statusCmd = &cobra.Command{
|
|||
if st.Host == Nonexistent {
|
||||
glog.Errorf("The %q host does not exist!", machineName)
|
||||
}
|
||||
statuses = append(statuses, st)
|
||||
}
|
||||
|
||||
switch strings.ToLower(output) {
|
||||
case "text":
|
||||
switch strings.ToLower(output) {
|
||||
case "text":
|
||||
for _, st := range statuses {
|
||||
if err := statusText(st, os.Stdout); err != nil {
|
||||
exit.WithError("status text failure", err)
|
||||
}
|
||||
case "json":
|
||||
if err := statusJSON(st, os.Stdout); err != nil {
|
||||
exit.WithError("status json failure", err)
|
||||
}
|
||||
default:
|
||||
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
|
||||
}
|
||||
case "json":
|
||||
if err := statusJSON(statuses, os.Stdout); err != nil {
|
||||
exit.WithError("status json failure", err)
|
||||
}
|
||||
default:
|
||||
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
|
||||
}
|
||||
|
||||
// TODO: Update for multi-node
|
||||
os.Exit(exitCode(st))
|
||||
os.Exit(exitCode(statuses))
|
||||
},
|
||||
}
|
||||
|
||||
func exitCode(st *Status) int {
|
||||
func exitCode(statuses []*Status) int {
|
||||
c := 0
|
||||
if st.Host != state.Running.String() {
|
||||
c |= minikubeNotRunningStatusFlag
|
||||
}
|
||||
if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() {
|
||||
c |= clusterNotRunningStatusFlag
|
||||
}
|
||||
if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant {
|
||||
c |= k8sNotRunningStatusFlag
|
||||
for _, st := range statuses {
|
||||
if st.Host != state.Running.String() {
|
||||
c |= minikubeNotRunningStatusFlag
|
||||
}
|
||||
if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() {
|
||||
c |= clusterNotRunningStatusFlag
|
||||
}
|
||||
if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant {
|
||||
c |= k8sNotRunningStatusFlag
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
@ -270,8 +273,15 @@ func statusText(st *Status, w io.Writer) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func statusJSON(st *Status, w io.Writer) error {
|
||||
js, err := json.Marshal(st)
|
||||
func statusJSON(st []*Status, w io.Writer) error {
|
||||
var js []byte
|
||||
var err error
|
||||
// Keep backwards compat with single node clusters to not break anyone
|
||||
if len(st) == 1 {
|
||||
js, err = json.Marshal(st[0])
|
||||
} else {
|
||||
js, err = json.Marshal(st)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ func TestExitCode(t *testing.T) {
|
|||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := exitCode(tc.state)
|
||||
got := exitCode([]*Status{tc.state})
|
||||
if got != tc.want {
|
||||
t.Errorf("exitcode(%+v) = %d, want: %d", tc.state, got, tc.want)
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ func TestStatusJSON(t *testing.T) {
|
|||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
err := statusJSON(tc.state, &b)
|
||||
err := statusJSON([]*Status{tc.state}, &b)
|
||||
if err != nil {
|
||||
t.Errorf("json(%+v) error: %v", tc.state, err)
|
||||
}
|
||||
|
|
|
@ -155,7 +155,6 @@ minikube node start [flags]
|
|||
```
|
||||
--delete-on-failure If set, delete the current cluster if start fails and try again. Defaults to false.
|
||||
-h, --help help for start
|
||||
--name string The name of the node to start
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
@ -187,8 +186,7 @@ minikube node stop [flags]
|
|||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for stop
|
||||
--name string The name of the node to delete
|
||||
-h, --help help for stop
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
|
|
@ -29,24 +29,154 @@ func TestMultiNode(t *testing.T) {
|
|||
if NoneDriver() {
|
||||
t.Skip("none driver does not support multinode")
|
||||
}
|
||||
MaybeParallel(t)
|
||||
|
||||
type validatorFunc func(context.Context, *testing.T, string)
|
||||
profile := UniqueProfileName("multinode")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(30))
|
||||
defer CleanupWithLogs(t, profile, cancel)
|
||||
|
||||
startArgs := append([]string{"start", "-p", profile, "--wait=true"}, StartArgs()...)
|
||||
t.Run("serial", func(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
validator validatorFunc
|
||||
}{
|
||||
{"FreshStart2Nodes", validateMultiNodeStart},
|
||||
{"AddNode", validateAddNodeToMultiNode},
|
||||
{"StopNode", validateStopRunningNode},
|
||||
{"StartAfterStop", validateStartNodeAfterStop},
|
||||
{"DeleteNode", validateDeleteNodeFromMultiNode},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tc.validator(ctx, t, profile)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) {
|
||||
// Start a 2 node cluster with the --nodes param
|
||||
startArgs := append([]string{"start", "-p", profile, "--wait=true", "--nodes=2"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Add a node to the current cluster
|
||||
addArgs := []string{"node", "add", "-p", profile, "-v", "3", "--alsologtostderr"}
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), addArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to add node to current cluster. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Make sure minikube status shows 2 nodes
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "host: Running") != 2 {
|
||||
t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 {
|
||||
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func validateAddNodeToMultiNode(ctx context.Context, t *testing.T, profile string) {
|
||||
// Add a node to the current cluster
|
||||
addArgs := []string{"node", "add", "-p", profile, "-v", "3", "--alsologtostderr"}
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to add node to current cluster. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Make sure minikube status shows 3 nodes
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "host: Running") != 3 {
|
||||
t.Errorf("status says all hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 {
|
||||
t.Errorf("status says all kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
}
|
||||
|
||||
func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) {
|
||||
// Names are autogenerated using the node.Name() function
|
||||
name := "m03"
|
||||
|
||||
// Run minikube node stop on that node
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", name))
|
||||
if err != nil {
|
||||
t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Run status again to see the stopped host
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
|
||||
// Exit code 7 means one host is stopped, which we are expecting
|
||||
if err != nil && rr.ExitCode != 7 {
|
||||
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Make sure minikube status shows 2 running nodes and 1 stopped one
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
|
||||
if err != nil && rr.ExitCode != 7 {
|
||||
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 {
|
||||
t.Errorf("incorrect number of running kubelets: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "host: Stopped") != 1 {
|
||||
t.Errorf("incorrect number of stopped hosts: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "kubelet: Stopped") != 1 {
|
||||
t.Errorf("incorrect number of stopped kubelets: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
}
|
||||
|
||||
func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile string) {
|
||||
// TODO (#7496): remove skip once restarts work
|
||||
t.Skip("Restarting nodes is broken :(")
|
||||
|
||||
// Grab the stopped node
|
||||
name := "m03"
|
||||
|
||||
// Start the node back up
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", name))
|
||||
if err != nil {
|
||||
t.Errorf("node start returned an error. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Make sure minikube status shows 3 running hosts
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "host: Running") != 3 {
|
||||
t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 {
|
||||
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
}
|
||||
}
|
||||
|
||||
func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile string) {
|
||||
name := "m03"
|
||||
|
||||
// Start the node back up
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "delete", name))
|
||||
if err != nil {
|
||||
t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Make sure status is back down to 2 hosts
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
|
||||
|
|
Loading…
Reference in New Issue