Merge pull request #11731 from andriyDev/fix-multinode-restart
Move node config deletion out of drainNode and into Deletepull/11674/head
commit
16d2f2a9d6
|
@ -82,7 +82,7 @@ func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error {
|
|||
|
||||
// drainNode drains then deletes (removes) node from cluster.
|
||||
func drainNode(cc config.ClusterConfig, name string) (*config.Node, error) {
|
||||
n, index, err := Retrieve(cc, name)
|
||||
n, _, err := Retrieve(cc, name)
|
||||
if err != nil {
|
||||
return n, errors.Wrap(err, "retrieve")
|
||||
}
|
||||
|
@ -130,8 +130,7 @@ func drainNode(cc config.ClusterConfig, name string) (*config.Node, error) {
|
|||
}
|
||||
klog.Infof("successfully deleted node %q", name)
|
||||
|
||||
cc.Nodes = append(cc.Nodes[:index], cc.Nodes[index+1:]...)
|
||||
return n, config.SaveProfile(viper.GetString(config.ProfileName), &cc)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Delete calls drainNode to remove node from cluster and deletes the host.
|
||||
|
@ -152,7 +151,13 @@ func Delete(cc config.ClusterConfig, name string) (*config.Node, error) {
|
|||
return n, err
|
||||
}
|
||||
|
||||
return n, nil
|
||||
_, index, err := Retrieve(cc, name)
|
||||
if err != nil {
|
||||
return n, errors.Wrap(err, "retrieve")
|
||||
}
|
||||
|
||||
cc.Nodes = append(cc.Nodes[:index], cc.Nodes[index+1:]...)
|
||||
return n, config.SaveProfile(viper.GetString(config.ProfileName), &cc)
|
||||
}
|
||||
|
||||
// Retrieve finds the node by name in the given cluster
|
||||
|
|
|
@ -256,6 +256,9 @@ tests the minikube node stop command
|
|||
#### validateStartNodeAfterStop
|
||||
tests the minikube node start command on an existing stopped node
|
||||
|
||||
#### validateRestartKeepsNodes
|
||||
restarts minikube cluster and checks if the reported node list is unchanged
|
||||
|
||||
#### validateStopMultiNodeCluster
|
||||
runs minikube stop on a multinode cluster
|
||||
|
||||
|
|
|
@ -55,6 +55,7 @@ func TestMultiNode(t *testing.T) {
|
|||
{"CopyFile", validateCopyFileWithMultiNode},
|
||||
{"StopNode", validateStopRunningNode},
|
||||
{"StartAfterStop", validateStartNodeAfterStop},
|
||||
{"RestartKeepsNodes", validateRestartKeepsNodes},
|
||||
{"DeleteNode", validateDeleteNodeFromMultiNode},
|
||||
{"StopMultiNode", validateStopMultiNodeCluster},
|
||||
{"RestartMultiNode", validateRestartMultiNodeCluster},
|
||||
|
@ -258,6 +259,36 @@ func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile strin
|
|||
}
|
||||
}
|
||||
|
||||
// validateRestartKeepsNodes restarts minikube cluster and checks if the reported node list is unchanged
|
||||
func validateRestartKeepsNodes(ctx context.Context, t *testing.T, profile string) {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "node", "list", "-p", profile))
|
||||
if err != nil {
|
||||
t.Errorf("failed to run node list. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
nodeList := rr.Stdout.String()
|
||||
|
||||
_, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile))
|
||||
if err != nil {
|
||||
t.Errorf("failed to run minikube stop. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
_, err = Run(t, exec.CommandContext(ctx, Target(), "start", "-p", profile, "--wait=true", "-v=8", "--alsologtostderr"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to run minikube start. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "node", "list", "-p", profile))
|
||||
if err != nil {
|
||||
t.Errorf("failed to run node list. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
restartedNodeList := rr.Stdout.String()
|
||||
if nodeList != restartedNodeList {
|
||||
t.Fatalf("reported node list is not the same after restart. Before restart: %s\nAfter restart: %s", nodeList, restartedNodeList)
|
||||
}
|
||||
}
|
||||
|
||||
// validateStopMultiNodeCluster runs minikube stop on a multinode cluster
|
||||
func validateStopMultiNodeCluster(ctx context.Context, t *testing.T, profile string) {
|
||||
// Run minikube stop on the cluster
|
||||
|
|
Loading…
Reference in New Issue