Merge branch 'master' into iso-release-v1.20.0-beta.0
commit
0c641ae0ed
|
@ -249,6 +249,12 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
|
|||
var cc config.ClusterConfig
|
||||
if existing != nil {
|
||||
cc = updateExistingConfigFromFlags(cmd, existing)
|
||||
|
||||
// identify appropriate cni then configure cruntime accordingly
|
||||
_, err := cni.New(&cc)
|
||||
if err != nil {
|
||||
return cc, config.Node{}, errors.Wrap(err, "cni")
|
||||
}
|
||||
} else {
|
||||
klog.Info("no existing cluster config was found, will generate one from the flags ")
|
||||
cc = generateNewConfigFromFlags(cmd, k8sVersion, drvName)
|
||||
|
|
|
@ -92,6 +92,7 @@ func waitPodCondition(cs *kubernetes.Clientset, name, namespace string, conditio
|
|||
klog.Info(reason)
|
||||
return true, nil
|
||||
}
|
||||
// return immediately: status == core.ConditionUnknown
|
||||
if status == core.ConditionUnknown {
|
||||
klog.Info(reason)
|
||||
return false, fmt.Errorf(reason)
|
||||
|
@ -101,6 +102,7 @@ func waitPodCondition(cs *kubernetes.Clientset, name, namespace string, conditio
|
|||
klog.Info(reason)
|
||||
lap = time.Now()
|
||||
}
|
||||
// return immediately: status == core.ConditionFalse
|
||||
return false, nil
|
||||
}
|
||||
if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, checkCondition); err != nil {
|
||||
|
@ -114,13 +116,13 @@ func waitPodCondition(cs *kubernetes.Clientset, name, namespace string, conditio
|
|||
func podConditionStatus(cs *kubernetes.Clientset, name, namespace string, condition core.PodConditionType) (status core.ConditionStatus, reason string) {
|
||||
pod, err := cs.CoreV1().Pods(namespace).Get(context.Background(), name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return core.ConditionUnknown, fmt.Sprintf("error getting pod %q in %q namespace: %v", name, namespace, err)
|
||||
return core.ConditionUnknown, fmt.Sprintf("error getting pod %q in %q namespace (skipping!): %v", name, namespace, err)
|
||||
}
|
||||
|
||||
// check if undelying node is Ready - in case we got stale data about the pod
|
||||
if pod.Spec.NodeName != "" {
|
||||
if status, reason := nodeConditionStatus(cs, pod.Spec.NodeName, core.NodeReady); status != core.ConditionTrue {
|
||||
return core.ConditionUnknown, fmt.Sprintf("node %q hosting pod %q in %q namespace is currently not %q: %v", pod.Spec.NodeName, name, namespace, core.NodeReady, reason)
|
||||
return core.ConditionUnknown, fmt.Sprintf("node %q hosting pod %q in %q namespace is currently not %q (skipping!): %v", pod.Spec.NodeName, name, namespace, core.NodeReady, reason)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -676,34 +676,6 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error {
|
|||
}
|
||||
}
|
||||
|
||||
if cfg.VerifyComponents[kverify.ExtraKey] {
|
||||
// after kubelet is restarted (with 'kubeadm init phase kubelet-start' above),
|
||||
// it appears as to be immediately Ready as well as all kube-system pods (last observed state),
|
||||
// then (after ~10sec) it realises it has some changes to apply, implying also pods restarts,
|
||||
// and by that time we would exit completely, so we wait until kubelet begins restarting pods
|
||||
klog.Info("waiting for restarted kubelet to initialise ...")
|
||||
start := time.Now()
|
||||
wait := func() error {
|
||||
pods, err := client.CoreV1().Pods(meta.NamespaceSystem).List(context.Background(), meta.ListOptions{LabelSelector: "tier=control-plane"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if ready, _ := kverify.IsPodReady(&pod); !ready {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("kubelet not initialised")
|
||||
}
|
||||
_ = retry.Expo(wait, 250*time.Millisecond, 1*time.Minute)
|
||||
klog.Infof("kubelet initialised")
|
||||
klog.Infof("duration metric: took %s waiting for restarted kubelet to initialise ...", time.Since(start))
|
||||
|
||||
if err := kverify.WaitExtra(client, kverify.CorePodsLabels, kconst.DefaultControlPlaneTimeout); err != nil {
|
||||
return errors.Wrap(err, "extra")
|
||||
}
|
||||
}
|
||||
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "runtime")
|
||||
|
@ -741,6 +713,35 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error {
|
|||
return errors.Wrap(err, "addons")
|
||||
}
|
||||
|
||||
// must be called after applyCNI and `kubeadm phase addon all` (ie, coredns redeploy)
|
||||
if cfg.VerifyComponents[kverify.ExtraKey] {
|
||||
// after kubelet is restarted (with 'kubeadm init phase kubelet-start' above),
|
||||
// it appears as to be immediately Ready as well as all kube-system pods (last observed state),
|
||||
// then (after ~10sec) it realises it has some changes to apply, implying also pods restarts,
|
||||
// and by that time we would exit completely, so we wait until kubelet begins restarting pods
|
||||
klog.Info("waiting for restarted kubelet to initialise ...")
|
||||
start := time.Now()
|
||||
wait := func() error {
|
||||
pods, err := client.CoreV1().Pods(meta.NamespaceSystem).List(context.Background(), meta.ListOptions{LabelSelector: "tier=control-plane"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if ready, _ := kverify.IsPodReady(&pod); !ready {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("kubelet not initialised")
|
||||
}
|
||||
_ = retry.Expo(wait, 250*time.Millisecond, 1*time.Minute)
|
||||
klog.Infof("kubelet initialised")
|
||||
klog.Infof("duration metric: took %s waiting for restarted kubelet to initialise ...", time.Since(start))
|
||||
|
||||
if err := kverify.WaitExtra(client, kverify.CorePodsLabels, kconst.DefaultControlPlaneTimeout); err != nil {
|
||||
return errors.Wrap(err, "extra")
|
||||
}
|
||||
}
|
||||
|
||||
if err := bsutil.AdjustResourceLimits(k.c); err != nil {
|
||||
klog.Warningf("unable to adjust resource limits: %v", err)
|
||||
}
|
||||
|
@ -776,9 +777,17 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) {
|
|||
joinCmd := r.Stdout.String()
|
||||
joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), 1)
|
||||
joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd))
|
||||
if cc.KubernetesConfig.CRISocket != "" {
|
||||
joinCmd = fmt.Sprintf("%s --cri-socket %s", joinCmd, cc.KubernetesConfig.CRISocket)
|
||||
|
||||
// avoid "Found multiple CRI sockets, please use --cri-socket to select one: /var/run/dockershim.sock, /var/run/crio/crio.sock" error
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: k.c, Socket: cc.KubernetesConfig.CRISocket})
|
||||
if err != nil {
|
||||
klog.Errorf("cruntime: %v", err)
|
||||
}
|
||||
sp := cr.SocketPath()
|
||||
if sp == "" {
|
||||
sp = kconst.DefaultDockerCRISocket
|
||||
}
|
||||
joinCmd = fmt.Sprintf("%s --cri-socket %s", joinCmd, sp)
|
||||
|
||||
return joinCmd, nil
|
||||
}
|
||||
|
|
|
@ -143,15 +143,6 @@ func chooseDefault(cc config.ClusterConfig) Manager {
|
|||
return Bridge{}
|
||||
}
|
||||
|
||||
if cc.KubernetesConfig.ContainerRuntime != "docker" {
|
||||
if driver.IsKIC(cc.Driver) {
|
||||
klog.Infof("%q driver + %s runtime found, recommending kindnet", cc.Driver, cc.KubernetesConfig.ContainerRuntime)
|
||||
return KindNet{cc: cc}
|
||||
}
|
||||
klog.Infof("%q driver + %s runtime found, recommending bridge", cc.Driver, cc.KubernetesConfig.ContainerRuntime)
|
||||
return Bridge{cc: cc}
|
||||
}
|
||||
|
||||
if driver.BareMetal(cc.Driver) {
|
||||
klog.Infof("Driver %s used, CNI unnecessary in this configuration, recommending no CNI", cc.Driver)
|
||||
return Disabled{cc: cc}
|
||||
|
@ -164,6 +155,15 @@ func chooseDefault(cc config.ClusterConfig) Manager {
|
|||
return KindNet{cc: cc}
|
||||
}
|
||||
|
||||
if cc.KubernetesConfig.ContainerRuntime != "docker" {
|
||||
if driver.IsKIC(cc.Driver) {
|
||||
klog.Infof("%q driver + %s runtime found, recommending kindnet", cc.Driver, cc.KubernetesConfig.ContainerRuntime)
|
||||
return KindNet{cc: cc}
|
||||
}
|
||||
klog.Infof("%q driver + %s runtime found, recommending bridge", cc.Driver, cc.KubernetesConfig.ContainerRuntime)
|
||||
return Bridge{cc: cc}
|
||||
}
|
||||
|
||||
klog.Infof("CNI unnecessary in this configuration, recommending no CNI")
|
||||
return Disabled{cc: cc}
|
||||
}
|
||||
|
|
|
@ -486,6 +486,7 @@ func (f *FakeRunner) systemctl(args []string, root bool) (string, error) { // no
|
|||
}
|
||||
|
||||
for _, svc := range svcs {
|
||||
svc = strings.Replace(svc, ".service", "", 1)
|
||||
state, ok := f.services[svc]
|
||||
if !ok {
|
||||
return out, fmt.Errorf("unknown fake service: %s", svc)
|
||||
|
@ -526,6 +527,11 @@ func (f *FakeRunner) systemctl(args []string, root bool) (string, error) { // no
|
|||
return out, nil
|
||||
}
|
||||
return out, fmt.Errorf("%s cat unimplemented", svc)
|
||||
case "enable":
|
||||
case "disable":
|
||||
case "mask":
|
||||
case "unmask":
|
||||
f.t.Logf("fake systemctl: %s %s: %v", svc, action, state)
|
||||
default:
|
||||
return out, fmt.Errorf("unimplemented fake action: %q", action)
|
||||
}
|
||||
|
@ -587,7 +593,8 @@ func TestDisable(t *testing.T) {
|
|||
runtime string
|
||||
want []string
|
||||
}{
|
||||
{"docker", []string{"sudo", "systemctl", "stop", "-f", "docker.socket", "sudo", "systemctl", "stop", "-f", "docker"}},
|
||||
{"docker", []string{"sudo", "systemctl", "stop", "-f", "docker.socket", "sudo", "systemctl", "stop", "-f", "docker.service",
|
||||
"sudo", "systemctl", "disable", "docker.socket", "sudo", "systemctl", "mask", "docker.service"}},
|
||||
{"crio", []string{"sudo", "systemctl", "stop", "-f", "crio"}},
|
||||
{"containerd", []string{"sudo", "systemctl", "stop", "-f", "containerd"}},
|
||||
}
|
||||
|
|
|
@ -120,6 +120,14 @@ func (r *Docker) Enable(disOthers, forceSystemd bool) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := r.Init.Unmask("docker.service"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := r.Init.Enable("docker.socket"); err != nil {
|
||||
klog.ErrorS(err, "Failed to enable", "service", "docker.socket")
|
||||
}
|
||||
|
||||
if forceSystemd {
|
||||
if err := r.forceSystemd(); err != nil {
|
||||
return err
|
||||
|
@ -146,7 +154,14 @@ func (r *Docker) Disable() error {
|
|||
if err := r.Init.ForceStop("docker.socket"); err != nil {
|
||||
klog.ErrorS(err, "Failed to stop", "service", "docker.socket")
|
||||
}
|
||||
return r.Init.ForceStop("docker")
|
||||
if err := r.Init.ForceStop("docker.service"); err != nil {
|
||||
klog.ErrorS(err, "Failed to stop", "service", "docker.service")
|
||||
return err
|
||||
}
|
||||
if err := r.Init.Disable("docker.socket"); err != nil {
|
||||
klog.ErrorS(err, "Failed to disable", "service", "docker.socket")
|
||||
}
|
||||
return r.Init.Mask("docker.service")
|
||||
}
|
||||
|
||||
// ImageExists checks if an image exists
|
||||
|
|
|
@ -122,6 +122,11 @@ func (s *OpenRC) DisableNow(svc string) error {
|
|||
return fmt.Errorf("disable now is not implemented for OpenRC! PRs to fix are welcomed")
|
||||
}
|
||||
|
||||
// Mask does nothing
|
||||
func (s *OpenRC) Mask(svc string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Enable does nothing
|
||||
func (s *OpenRC) Enable(svc string) error {
|
||||
return nil
|
||||
|
@ -132,6 +137,11 @@ func (s *OpenRC) EnableNow(svc string) error {
|
|||
return fmt.Errorf("enable now is not implemented for OpenRC! PRs to fix are welcomed")
|
||||
}
|
||||
|
||||
// Unmask does nothing
|
||||
func (s *OpenRC) Unmask(svc string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Restart restarts a service
|
||||
func (s *OpenRC) Restart(svc string) error {
|
||||
rr, err := s.r.RunCmd(exec.Command("sudo", "service", svc, "restart"))
|
||||
|
|
|
@ -44,12 +44,18 @@ type Manager interface {
|
|||
// Disable disables a service and stops it right after.
|
||||
DisableNow(string) error
|
||||
|
||||
// Mask prevents a service from being started
|
||||
Mask(string) error
|
||||
|
||||
// Enable enables a service
|
||||
Enable(string) error
|
||||
|
||||
// EnableNow enables a service and starts it right after.
|
||||
EnableNow(string) error
|
||||
|
||||
// Unmask allows a service to be started
|
||||
Unmask(string) error
|
||||
|
||||
// Start starts a service idempotently
|
||||
Start(string) error
|
||||
|
||||
|
|
|
@ -58,6 +58,12 @@ func (s *Systemd) DisableNow(svc string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Mask prevents a service from being started
|
||||
func (s *Systemd) Mask(svc string) error {
|
||||
_, err := s.r.RunCmd(exec.Command("sudo", "systemctl", "mask", svc))
|
||||
return err
|
||||
}
|
||||
|
||||
// Enable enables a service
|
||||
func (s *Systemd) Enable(svc string) error {
|
||||
if svc == "kubelet" {
|
||||
|
@ -76,6 +82,12 @@ func (s *Systemd) EnableNow(svc string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Unmask allows a service to be started
|
||||
func (s *Systemd) Unmask(svc string) error {
|
||||
_, err := s.r.RunCmd(exec.Command("sudo", "systemctl", "unmask", svc))
|
||||
return err
|
||||
}
|
||||
|
||||
// Start starts a service
|
||||
func (s *Systemd) Start(svc string) error {
|
||||
if err := s.daemonReload(); err != nil {
|
||||
|
|
Loading…
Reference in New Issue