kube-proxy/WaitCluster fix: don't bootstrap from a stale context
parent
71f0ae9bc2
commit
e3d23d89f1
|
|
@ -328,15 +328,15 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
showVersionInfo(k8sVersion, cr)
|
||||
waitCacheImages(&cacheGroup)
|
||||
|
||||
// setup kube adm and certs and return bootstrapperx
|
||||
bs := setupKubeAdm(machineAPI, config.KubernetesConfig)
|
||||
|
||||
// The kube config must be update must come before bootstrapping, otherwise health checks may use a stale IP
|
||||
// Must be written before bootstrap, otherwise health checks may flake due to stale IP
|
||||
kubeconfig, err := setupKubeconfig(host, &config)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to setup kubeconfig", err)
|
||||
}
|
||||
|
||||
// setup kubeadm (must come after setupKubeconfig)
|
||||
bs := setupKubeAdm(machineAPI, config.KubernetesConfig)
|
||||
|
||||
// pull images or restart cluster
|
||||
bootstrapCluster(bs, cr, mRunner, config.KubernetesConfig, preExists, isUpgrade)
|
||||
configureMounts()
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
@ -33,6 +32,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
|
|
@ -46,21 +46,26 @@ var (
|
|||
ReasonableStartTime = time.Minute * 5
|
||||
)
|
||||
|
||||
// Client gets the kubernetes client from default kubeconfig
|
||||
func Client(kubectlContext string) (kubernetes.Interface, error) {
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
configOverrides := &clientcmd.ConfigOverrides{}
|
||||
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
|
||||
config, err := kubeConfig.ClientConfig()
|
||||
// ClientConfig returns the client configuration for a kubectl context
|
||||
func ClientConfig(context string) (*rest.Config, error) {
|
||||
loader := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
cc := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loader, &clientcmd.ConfigOverrides{CurrentContext: context})
|
||||
c, err := cc.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating kubeConfig: %v", err)
|
||||
return nil, fmt.Errorf("client config: %v", err)
|
||||
}
|
||||
config = proxy.UpdateTransport(config)
|
||||
client, err := kubernetes.NewForConfig(config)
|
||||
c = proxy.UpdateTransport(c)
|
||||
glog.V(1).Infof("client config for %s: %+v", context, c)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Client gets the kubernetes client for a kubectl context name
|
||||
func Client(context string) (*kubernetes.Clientset, error) {
|
||||
c, err := ClientConfig(context)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating new client from kubeConfig.ClientConfig()")
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
return kubernetes.NewForConfig(c)
|
||||
}
|
||||
|
||||
// WaitForPodsWithLabelRunning waits for all matching pods to become Running and at least one matching pod exists.
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/minikube/pkg/kapi"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
|
|
@ -339,10 +340,6 @@ func (k *Bootstrapper) WaitCluster(k8s config.KubernetesConfig, timeout time.Dur
|
|||
// up. Otherwise, minikube won't start, as "k8s-app" pods are not ready.
|
||||
componentsOnly := k8s.NetworkPlugin == "cni"
|
||||
out.T(out.WaitingPods, "Waiting for:")
|
||||
client, err := kapi.Client(k8s.NodeName)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "k8s client")
|
||||
}
|
||||
|
||||
// Wait until the apiserver can answer queries properly. We don't care if the apiserver
|
||||
// pod shows up as registered, but need the webserver for all subsequent queries.
|
||||
|
|
@ -351,6 +348,18 @@ func (k *Bootstrapper) WaitCluster(k8s config.KubernetesConfig, timeout time.Dur
|
|||
return errors.Wrap(err, "waiting for apiserver")
|
||||
}
|
||||
|
||||
// Catch case if WaitCluster was called with a stale ~/.kube/config
|
||||
config, err := kapi.ClientConfig(k8s.NodeName)
|
||||
endpoint := fmt.Sprintf("https://%s:%d", k8s.NodeIP, k8s.NodePort)
|
||||
if config.Host != endpoint {
|
||||
glog.Errorf("Overriding stale ClientConfig host %s with %s", config.Host, endpoint)
|
||||
config.Host = endpoint
|
||||
}
|
||||
|
||||
client, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "k8s client")
|
||||
}
|
||||
for _, p := range PodsByLayer {
|
||||
if componentsOnly && p.key != "component" { // skip component check if network plugin is cni
|
||||
continue
|
||||
|
|
|
|||
Loading…
Reference in New Issue