wait for ingress pod be running

pull/8563/head
Medya Gh 2020-06-26 14:22:17 -07:00
parent 52f4e130b5
commit 66421a6492
2 changed files with 18 additions and 11 deletions

View File

@ -141,7 +141,7 @@ func enableOrDisableAddon(cc *config.ClusterConfig, name string, val string) err
}
// to match both ingress and ingress-dns adons
if strings.HasPrefix(name, "ingress") && enable && driver.IsKIC(cc.Driver) && runtime.GOOS != "linux" {
if strings.HasPrefix(name, "ingress") && enable && driver.IsKIC(cc.Driver) && runtime.GOOS == "linux" {
exit.UsageT(`Due to {{.driver_name}} networking limitations on {{.os_name}}, {{.addon_name}} addon is not supported for this driver.
Alternatively to use this addon you can use a vm-based driver:
@ -324,10 +324,18 @@ func validateIngress(cc *config.ClusterConfig, name string, val string) error {
if err != nil {
return errors.Wrapf(err, "get kube-client to validate ingress addon: %s", name)
}
err = kapi.WaitForDeploymentToStabilize(client, "kube-system", "ingress-nginx-controller", time.Minute*3)
if err != nil {
return errors.Wrapf(err, "Failed verifying ingress addon deployment: %s", name)
}
// app.kubernetes.io/name: ingress-nginx
err = kapi.WaitForPods(client, "kube-system", "app.kubernetes.io/name=ingress-nginx", time.Minute*3)
if err != nil {
return errors.Wrapf(err, "Failed verifying ingress addon deployment: %s", name)
}
}
return nil
}

View File

@ -28,7 +28,6 @@ import (
apierr "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
@ -70,21 +69,21 @@ func Client(context string) (*kubernetes.Clientset, error) {
return kubernetes.NewForConfig(c)
}
// WaitForPodsWithLabelRunning waits for all matching pods to become Running and at least one matching pod exists.
func WaitForPodsWithLabelRunning(c kubernetes.Interface, ns string, label labels.Selector, timeOut ...time.Duration) error {
// WaitForPods waits for all matching pods to become Running or finish successfully and at least one matching pod exists.
func WaitForPods(c kubernetes.Interface, ns string, selector string, timeOut ...time.Duration) error {
start := time.Now()
glog.Infof("Waiting for pod with label %q in ns %q ...", ns, label)
glog.Infof("Waiting for pod with label %q in ns %q ...", ns, selector)
lastKnownPodNumber := -1
f := func() (bool, error) {
listOpts := meta.ListOptions{LabelSelector: label.String()}
listOpts := meta.ListOptions{LabelSelector: selector}
pods, err := c.CoreV1().Pods(ns).List(listOpts)
if err != nil {
glog.Infof("temporary error: getting Pods with label selector %q : [%v]\n", label.String(), err)
glog.Infof("temporary error: getting Pods with label selector %q : [%v]\n", selector, err)
return false, nil
}
if lastKnownPodNumber != len(pods.Items) {
glog.Infof("Found %d Pods for label selector %s\n", len(pods.Items), label.String())
glog.Infof("Found %d Pods for label selector %s\n", len(pods.Items), selector)
lastKnownPodNumber = len(pods.Items)
}
@ -93,8 +92,8 @@ func WaitForPodsWithLabelRunning(c kubernetes.Interface, ns string, label labels
}
for _, pod := range pods.Items {
if pod.Status.Phase != core.PodRunning {
glog.Infof("waiting for pod %q, current state: %s: [%v]\n", label.String(), pod.Status.Phase, err)
if pod.Status.Phase != core.PodRunning && pod.Status.Phase != core.PodSucceeded {
glog.Infof("waiting for pod %q, current state: %s: [%v]\n", selector, pod.Status.Phase, err)
return false, nil
}
}
@ -106,7 +105,7 @@ func WaitForPodsWithLabelRunning(c kubernetes.Interface, ns string, label labels
t = timeOut[0]
}
err := wait.PollImmediate(kconst.APICallRetryInterval, t, f)
glog.Infof("duration metric: took %s to wait for %s ...", time.Since(start), label)
glog.Infof("duration metric: took %s to wait for %s ...", time.Since(start), selector)
return err
}