check kubelet status explicitly
parent
e3b7476f32
commit
2ec26658a6
|
@ -627,6 +627,20 @@ func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, n config.Node) err
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "running join phase kubelet-start")
|
return errors.Wrap(err, "running join phase kubelet-start")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This can fail during upgrades if the old pods have not shut down yet
|
||||||
|
kubeletStatus := func() error {
|
||||||
|
st := kverify.KubeletStatus(k.c)
|
||||||
|
if st != state.Running {
|
||||||
|
return errors.New("kubelet not running")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err = retry.Expo(kubeletStatus, 100*time.Microsecond, 30*time.Second); err != nil {
|
||||||
|
glog.Warningf("kubelet is not ready: %v", err)
|
||||||
|
return errors.Wrap(err, "kubelet")
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -161,6 +161,12 @@ func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile strin
|
||||||
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 {
|
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 {
|
||||||
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
|
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure kubectl can connect correctly
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to kubectl get nodes. args %q : %v", rr.Command(), err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile string) {
|
func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile string) {
|
||||||
|
|
Loading…
Reference in New Issue