Docker driver: force docker to use systemd as cgroup manager
Since minikube is running systemd, kubeadm expects kubeadm to be the cgroup manager. If docker is using a different cgroup manager like cgroupfs, this can cause unstable resource allocation. We were seeing this in Cloud Shell, and forcing docker to use systemd resolved the issue.pull/7815/head
parent
692763206e
commit
a2d180ec07
|
@ -429,3 +429,8 @@ func addRepoTagToImageName(imgName string) string {
|
|||
} // else it already has repo name dont add anything
|
||||
return imgName
|
||||
}
|
||||
|
||||
// TODO: Enable for containerd
|
||||
func (r *Containerd) ForceSystemdCgroupManager() error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -229,3 +229,8 @@ func (r *CRIO) Preload(cfg config.KubernetesConfig) error {
|
|||
}
|
||||
return fmt.Errorf("not yet implemented for %s", r.Name())
|
||||
}
|
||||
|
||||
// ForceSystemdCgroupManager does nothing since CRIO already uses systemd
|
||||
func (r *CRIO) ForceSystemdCgroupManager() error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -104,6 +104,8 @@ type Manager interface {
|
|||
SystemLogCmd(int) string
|
||||
// Preload preloads the container runtime with k8s images
|
||||
Preload(config.KubernetesConfig) error
|
||||
// ForceSystemdCgroupManager forces the runtime to use systemd as cgroup manager
|
||||
ForceSystemdCgroupManager() error
|
||||
}
|
||||
|
||||
// Config is runtime configuration
|
||||
|
|
|
@ -365,6 +365,25 @@ func dockerImagesPreloaded(runner command.Runner, images []string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// ForceSystemdCgroupManager forces docker to use systemd as cgroup manager, as recommended in k8s docs:
|
||||
// https://kubernetes.io/docs/setup/production-environment/container-runtimes/#docker
|
||||
func (r *Docker) ForceSystemdCgroupManager() error {
|
||||
daemonConfig := `{
|
||||
"exec-opts": ["native.cgroupdriver=systemd"],
|
||||
"log-driver": "json-file",
|
||||
"log-opts": {
|
||||
"max-size": "100m"
|
||||
},
|
||||
"storage-driver": "overlay2"
|
||||
}
|
||||
`
|
||||
ma := assets.NewMemoryAsset([]byte(daemonConfig), "/etc/docker", "daemon.json", "0644")
|
||||
if err := r.Runner.Copy(ma); err != nil {
|
||||
return errors.Wrap(err, "copying daemon config")
|
||||
}
|
||||
return r.Restart()
|
||||
}
|
||||
|
||||
func dockerBoundToContainerd(runner command.Runner) bool {
|
||||
// NOTE: assumes systemd
|
||||
rr, err := runner.RunCmd(exec.Command("sudo", "systemctl", "cat", "docker.service"))
|
||||
|
|
|
@ -99,6 +99,7 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
|
|||
|
||||
// setup kubeadm (must come after setupKubeconfig)
|
||||
bs = setupKubeAdm(starter.MachineAPI, *starter.Cfg, *starter.Node, starter.Runner)
|
||||
|
||||
err = bs.StartCluster(*starter.Cfg)
|
||||
|
||||
if err != nil {
|
||||
|
@ -261,6 +262,12 @@ func configureRuntimes(runner cruntime.CommandRunner, cc config.ClusterConfig, k
|
|||
exit.WithError("Failed to enable container runtime", err)
|
||||
}
|
||||
|
||||
if driver.IsKIC(cc.Driver) {
|
||||
if err := cr.ForceSystemdCgroupManager(); err != nil {
|
||||
glog.Warningf("Failed to force %s to use systemd as cgroup manager (this might be ok): %v", cr.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return cr
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue