Merge pull request #15463 from prezha/fix-TestNetworkPlugins-Linux_Docker

improve how CRs and k8s work with CNI plugins and cgroup drivers
pull/15670/head
Steven Powell 2023-01-19 13:04:52 -08:00 committed by GitHub
commit 0e7aefca1b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
129 changed files with 2325 additions and 1427 deletions

View File

@ -199,7 +199,7 @@ func mustRestartDockerd(name string, runner command.Runner) {
if err := sysinit.New(runner).Reload("docker"); err != nil {
klog.Warningf("will try to restart dockerd because reload failed: %v", err)
if err := sysinit.New(runner).Restart("docker"); err != nil {
klog.Warningf("Couldn't restart docker inside minikbue within '%v' because: %v", name, err)
klog.Warningf("Couldn't restart docker inside minikube within '%v' because: %v", name, err)
return
}
// if we get to the point that we have to restart docker (instead of reload)

View File

@ -40,8 +40,7 @@ oom_score = 0
[plugins."io.containerd.grpc.v1.cri".containerd]
discard_unpacked_layers = true
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
runtime_type = "io.containerd.runc.v2"
default_runtime_name = "runc"
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
runtime_type = ""
runtime_engine = ""
@ -54,7 +53,7 @@ oom_score = 0
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.mk"
conf_dir = "/etc/cni/net.d"
conf_template = ""
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"

View File

@ -1,131 +0,0 @@
version = 2
root = "/var/lib/containerd"
state = "/run/containerd"
plugin_dir = ""
disabled_plugins = []
required_plugins = []
oom_score = 0
[grpc]
address = "/run/containerd/containerd.sock"
tcp_address = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
[ttrpc]
address = ""
uid = 0
gid = 0
[debug]
address = ""
uid = 0
gid = 0
level = ""
[metrics]
address = ""
grpc_histogram = false
[cgroup]
path = ""
[timeouts]
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
pause_threshold = 0.02
deletion_threshold = 0
mutation_threshold = 100
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"]
disable_tcp_service = true
stream_server_address = "127.0.0.1"
stream_server_port = "0"
stream_idle_timeout = "4h0m0s"
enable_selinux = false
selinux_category_range = 1024
sandbox_image = "k8s.gcr.io/pause:3.2"
stats_collect_period = 10
systemd_cgroup = false
enable_tls_streaming = false
max_container_log_line_size = 16384
disable_cgroup = false
disable_apparmor = false
restrict_oom_score_adj = false
max_concurrent_downloads = 3
disable_proc_mount = false
unset_seccomp_profile = ""
tolerate_missing_hugetlb_controller = true
disable_hugetlb_controller = true
ignore_image_defined_volumes = false
[plugins."io.containerd.grpc.v1.cri".containerd]
snapshotter = "overlayfs"
default_runtime_name = "runc"
no_pivot = false
disable_snapshot_annotations = true
discard_unpacked_layers = false
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
runtime_type = ""
runtime_engine = ""
runtime_root = ""
privileged_without_host_devices = false
base_runtime_spec = ""
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
runtime_type = ""
runtime_engine = ""
runtime_root = ""
privileged_without_host_devices = false
base_runtime_spec = ""
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
runtime_engine = ""
runtime_root = ""
privileged_without_host_devices = false
base_runtime_spec = ""
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
max_conf_num = 1
conf_template = ""
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
[plugins."io.containerd.grpc.v1.cri".image_decryption]
key_model = ""
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.runtime.v1.linux"]
shim = "containerd-shim"
runtime = "runc"
runtime_root = ""
no_shim = false
shim_debug = false
[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/amd64"]
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.snapshotter.v1.devmapper"]
root_path = ""
pool_name = ""
base_image_size = ""
async_remove = false

View File

@ -15,6 +15,6 @@ sha256 85a531725f15e2d136131119d42af4507a5389e0947015152075c4c93816fb5c v1.4.12.
sha256 7507913ba169c103ab67bc51bec31cd977d4348d7bc842da32b7eab5f930a14b v1.5.10.tar.gz
sha256 02b79d5e2b07b5e64cd28f1fe84395ee11eef95fc49fd923a9ab93022b148be6 v1.5.11.tar.gz
sha256 f422e21e35705d1e741c1f3280813e43f811eaff4dcc5cdafac8b8952b15f468 v1.6.4.tar.gz
sha265 27afb673c20d53aa5c31aec07b38eb7e4dc911e7e1f0c76fac9513bbf070bd24 v1.6.6.tar.gz
sha256 27afb673c20d53aa5c31aec07b38eb7e4dc911e7e1f0c76fac9513bbf070bd24 v1.6.6.tar.gz
sha256 f5f938513c28377f64f85e84f2750d39f26b01262f3a062b7e8ce35b560ca407 v1.6.8.tar.gz
sha256 a034b2273533207d5d96bef8bd3fce1efff85139815efb756d90c705ae1a05ce v1.6.9.tar.gz

View File

@ -14,6 +14,7 @@ CONTAINERD_BIN_AARCH64_ENV = \
CGO_ENABLED=1 \
GO111MODULE=off \
GOPATH="$(CONTAINERD_BIN_AARCH64_GOPATH)" \
GOBIN="$(CONTAINERD_BIN_AARCH64_GOPATH)/bin" \
PATH=$(CONTAINERD_BIN_AARCH64_GOPATH)/bin:$(BR_PATH) \
GOARCH=arm64

View File

@ -15,8 +15,9 @@ ExecStart=/usr/bin/containerd \
$CONTAINERD_MINIKUBE_OPTIONS \
--root ${PERSISTENT_DIR}/var/lib/containerd
TasksMax=8192
# ref: https://github.com/containerd/containerd/blob/main/docs/ops.md#systemd
Delegate=yes
KillMode=mixed
KillMode=process
LimitNOFILE=1048576
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.

View File

@ -40,8 +40,7 @@ oom_score = 0
[plugins."io.containerd.grpc.v1.cri".containerd]
discard_unpacked_layers = true
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
runtime_type = "io.containerd.runc.v2"
default_runtime_name = "runc"
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
runtime_type = ""
runtime_engine = ""
@ -54,11 +53,11 @@ oom_score = 0
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.mk"
conf_dir = "/etc/cni/net.d"
conf_template = ""
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.gc.v1.scheduler"]

View File

@ -1,131 +0,0 @@
version = 2
root = "/var/lib/containerd"
state = "/run/containerd"
plugin_dir = ""
disabled_plugins = []
required_plugins = []
oom_score = 0
[grpc]
address = "/run/containerd/containerd.sock"
tcp_address = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
[ttrpc]
address = ""
uid = 0
gid = 0
[debug]
address = ""
uid = 0
gid = 0
level = ""
[metrics]
address = ""
grpc_histogram = false
[cgroup]
path = ""
[timeouts]
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
pause_threshold = 0.02
deletion_threshold = 0
mutation_threshold = 100
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"]
disable_tcp_service = true
stream_server_address = "127.0.0.1"
stream_server_port = "0"
stream_idle_timeout = "4h0m0s"
enable_selinux = false
selinux_category_range = 1024
sandbox_image = "k8s.gcr.io/pause:3.2"
stats_collect_period = 10
systemd_cgroup = false
enable_tls_streaming = false
max_container_log_line_size = 16384
disable_cgroup = false
disable_apparmor = false
restrict_oom_score_adj = false
max_concurrent_downloads = 3
disable_proc_mount = false
unset_seccomp_profile = ""
tolerate_missing_hugetlb_controller = true
disable_hugetlb_controller = true
ignore_image_defined_volumes = false
[plugins."io.containerd.grpc.v1.cri".containerd]
snapshotter = "overlayfs"
default_runtime_name = "runc"
no_pivot = false
disable_snapshot_annotations = true
discard_unpacked_layers = false
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
runtime_type = ""
runtime_engine = ""
runtime_root = ""
privileged_without_host_devices = false
base_runtime_spec = ""
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
runtime_type = ""
runtime_engine = ""
runtime_root = ""
privileged_without_host_devices = false
base_runtime_spec = ""
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
runtime_engine = ""
runtime_root = ""
privileged_without_host_devices = false
base_runtime_spec = ""
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
max_conf_num = 1
conf_template = ""
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
[plugins."io.containerd.grpc.v1.cri".image_decryption]
key_model = ""
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.runtime.v1.linux"]
shim = "containerd-shim"
runtime = "runc"
runtime_root = ""
no_shim = false
shim_debug = false
[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/amd64"]
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.snapshotter.v1.devmapper"]
root_path = ""
pool_name = ""
base_image_size = ""
async_remove = false

View File

@ -15,8 +15,9 @@ ExecStart=/usr/bin/containerd \
$CONTAINERD_MINIKUBE_OPTIONS \
--root ${PERSISTENT_DIR}/var/lib/containerd
TasksMax=8192
# ref: https://github.com/containerd/containerd/blob/main/docs/ops.md#systemd
Delegate=yes
KillMode=mixed
KillMode=process
LimitNOFILE=1048576
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.

View File

@ -1,7 +1,7 @@
version = 2
root = "/var/lib/containerd"
root = "/var/lib/containerd"
state = "/run/containerd"
oom_score = 0
oom_score = 0
# imports
[grpc]
@ -40,8 +40,7 @@ oom_score = 0
[plugins."io.containerd.grpc.v1.cri".containerd]
discard_unpacked_layers = true
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
runtime_type = "io.containerd.runc.v2"
default_runtime_name = "runc"
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
runtime_type = ""
runtime_engine = ""
@ -54,7 +53,7 @@ oom_score = 0
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.mk"
conf_dir = "/etc/cni/net.d"
conf_template = ""
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"

13
go.mod
View File

@ -32,15 +32,6 @@ require (
github.com/hooklift/iso9660 v0.0.0-20170318115843-1cf07e5970d8
github.com/intel-go/cpuid v0.0.0-20181003105527-1a4a6f06a1c6 // indirect
github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f
github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c
github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d // indirect
github.com/juju/fslock v0.0.0-20160525022230-4d5c94c67b4b
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 // indirect
github.com/juju/mutex v0.0.0-20180619145857-d21b13acf4bf
github.com/juju/retry v0.0.0-20180821225755-9058e192b216 // indirect
github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2 // indirect
github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d // indirect
github.com/juju/version v0.0.0-20180108022336-b64dbd566305 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
github.com/klauspost/cpuid v1.2.0
github.com/machine-drivers/docker-machine-driver-vmware v0.1.5
@ -99,6 +90,9 @@ require (
github.com/docker/cli v20.10.22+incompatible
github.com/docker/go-connections v0.4.0
github.com/google/go-github/v43 v43.0.0
github.com/juju/clock v1.0.2
github.com/juju/fslock v0.0.0-20160525022230-4d5c94c67b4b
github.com/juju/mutex/v2 v2.0.0
github.com/opencontainers/runc v1.1.4
github.com/santhosh-tekuri/jsonschema/v5 v5.1.1
)
@ -166,6 +160,7 @@ require (
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9 // indirect
github.com/klauspost/compress v1.15.11 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/magiconair/properties v1.8.6 // indirect

29
go.sum
View File

@ -683,24 +683,21 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c h1:3UvYABOQRhJAApj9MdCN+Ydv841ETSoy6xLzdmmr/9A=
github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c/go.mod h1:nD0vlnrUjcjJhqN5WuCWZyzfd5AHZAC9/ajvbSx69xA=
github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d h1:hJXjZMxj0SWlMoQkzeZDLi2cmeiWKa7y1B8Rg+qaoEc=
github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
github.com/juju/clock v1.0.2 h1:dJFdUGjtR/76l6U5WLVVI/B3i6+u3Nb9F9s1m+xxrxo=
github.com/juju/clock v1.0.2/go.mod h1:HIBvJ8kiV/n7UHwKuCkdYL4l/MDECztHR2sAvWDxxf0=
github.com/juju/collections v0.0.0-20200605021417-0d0ec82b7271 h1:4R626WTwa7pRYQFiIRLVPepMhm05eZMEx+wIurRnMLc=
github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9 h1:EJHbsNpQyupmMeWTq7inn+5L/WZ7JfzCVPJ+DP9McCQ=
github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9/go.mod h1:TRm7EVGA3mQOqSVcBySRY7a9Y1/gyVhh/WTCnc5sD4U=
github.com/juju/fslock v0.0.0-20160525022230-4d5c94c67b4b h1:FQ7+9fxhyp82ks9vAuyPzG0/vVbWwMwLJ+P6yJI5FN8=
github.com/juju/fslock v0.0.0-20160525022230-4d5c94c67b4b/go.mod h1:HMcgvsgd0Fjj4XXDkbjdmlbI505rUPBs6WBMYg2pXks=
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI=
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
github.com/juju/mutex v0.0.0-20180619145857-d21b13acf4bf h1:2d3cilQly1OpAfZcn4QRuwDOdVoHsM4cDTkcKbmO760=
github.com/juju/mutex v0.0.0-20180619145857-d21b13acf4bf/go.mod h1:Y3oOzHH8CQ0Ppt0oCKJ2JFO81/EsWenH5AEqigLH+yY=
github.com/juju/loggo v0.0.0-20210728185423-eebad3a902c4 h1:NO5tuyw++EGLnz56Q8KMyDZRwJwWO8jQnj285J3FOmY=
github.com/juju/mgo/v2 v2.0.0-20210302023703-70d5d206e208 h1:/WiCm+Vpj87e4QWuWwPD/bNE9kDrWCLvPBHOQNcG2+A=
github.com/juju/mutex/v2 v2.0.0 h1:rVmJdOaXGWF8rjcFHBNd4x57/1tks5CgXHx55O55SB0=
github.com/juju/mutex/v2 v2.0.0/go.mod h1:jwCfBs/smYDaeZLqeaCi8CB8M+tOes4yf827HoOEoqk=
github.com/juju/retry v0.0.0-20180821225755-9058e192b216 h1:/eQL7EJQKFHByJe3DeE8Z36yqManj9UY5zppDoQi4FU=
github.com/juju/retry v0.0.0-20180821225755-9058e192b216/go.mod h1:OohPQGsr4pnxwD5YljhQ+TZnuVRYpa5irjugL1Yuif4=
github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2 h1:Pp8RxiF4rSoXP9SED26WCfNB28/dwTDpPXS8XMJR8rc=
github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d h1:irPlN9z5VCe6BTsqVsxheCZH99OFSmqSVyTigW4mEoY=
github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d/go.mod h1:6/KLg8Wz/y2KVGWEpkK9vMNGkOnu4k/cqs8Z1fKjTOk=
github.com/juju/version v0.0.0-20180108022336-b64dbd566305 h1:lQxPJ1URr2fjsKnJRt/BxiIxjLt9IKGvS+0injMHbag=
github.com/juju/version v0.0.0-20180108022336-b64dbd566305/go.mod h1:kE8gK5X0CImdr7qpSKl3xB2PmpySSmfj7zVbkZFs81U=
github.com/juju/testing v0.0.0-20220203020004-a0ff61f03494 h1:XEDzpuZb8Ma7vLja3+5hzUqVTvAqm5Y+ygvnDs5iTMM=
github.com/juju/utils/v3 v3.0.0-20220130232349-cd7ecef0e94a h1:5ZWDCeCF0RaITrZGemzmDFIhjR/MVSvBUqgSyaeTMbE=
github.com/juju/version/v2 v2.0.0-20211007103408-2e8da085dc23 h1:wtEPbidt1VyHlb8RSztU6ySQj29FLsOQiI9XiJhXDM4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
@ -1688,8 +1685,6 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=

View File

@ -31,6 +31,7 @@ import (
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/detect"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/sysinit"
"k8s.io/minikube/pkg/util"
@ -93,7 +94,8 @@ func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string
if err != nil {
return errors.Wrap(err, "failed create new runtime")
}
if err := cr.Enable(true, false, false); err != nil {
if err := cr.Enable(true, detect.CgroupDriver(), false); err != nil {
return errors.Wrap(err, "enable container runtime")
}

View File

@ -145,6 +145,7 @@ func tryCreateDockerNetwork(ociBin string, subnet *network.Parameters, mtu int,
rr, err := runCmd(exec.Command(ociBin, args...))
if err != nil {
klog.Errorf("failed to create %s network %s %s with gateway %s and mtu of %d: %v", ociBin, name, subnet.CIDR, subnet.Gateway, mtu, err)
// Pool overlaps with other one on this address space
if strings.Contains(rr.Output(), "Pool overlaps") {
return nil, ErrNetworkSubnetTaken

View File

@ -97,7 +97,6 @@ func WaitForPods(c kubernetes.Interface, ns string, selector string, timeOut ...
return false, nil
}
}
return true, nil
}
t := ReasonableStartTime
@ -223,14 +222,19 @@ func ScaleDeployment(kcontext, namespace, deploymentName string, replicas int) e
err = wait.PollImmediate(kconst.APICallRetryInterval, ReasonableMutateTime, func() (bool, error) {
scale, err := client.AppsV1().Deployments(namespace).GetScale(context.Background(), deploymentName, meta.GetOptions{})
if err != nil {
klog.Warningf("failed getting deployment scale, will retry: %v", err)
if !IsRetryableAPIError(err) {
return false, fmt.Errorf("non-retryable failure while getting %q deployment scale: %v", deploymentName, err)
}
klog.Warningf("failed getting %q deployment scale, will retry: %v", deploymentName, err)
return false, nil
}
if scale.Spec.Replicas != int32(replicas) {
scale.Spec.Replicas = int32(replicas)
_, err = client.AppsV1().Deployments(namespace).UpdateScale(context.Background(), deploymentName, scale, meta.UpdateOptions{})
if err != nil {
klog.Warningf("failed rescaling deployment, will retry: %v", err)
if _, err = client.AppsV1().Deployments(namespace).UpdateScale(context.Background(), deploymentName, scale, meta.UpdateOptions{}); err != nil {
if !IsRetryableAPIError(err) {
return false, fmt.Errorf("non-retryable failure while rescaling %s deployment: %v", deploymentName, err)
}
klog.Warningf("failed rescaling %s deployment, will retry: %v", deploymentName, err)
}
// repeat (if change was successful - once again to check & confirm requested scale)
return false, nil
@ -238,10 +242,10 @@ func ScaleDeployment(kcontext, namespace, deploymentName string, replicas int) e
return true, nil
})
if err != nil {
klog.Infof("timed out trying to rescale deployment %q in namespace %q and context %q to %d: %v", deploymentName, namespace, kcontext, replicas, err)
klog.Warningf("failed rescaling %q deployment in %q namespace and %q context to %d replicas: %v", deploymentName, namespace, kcontext, replicas, err)
return err
}
klog.Infof("deployment %q in namespace %q and context %q rescaled to %d", deploymentName, namespace, kcontext, replicas)
klog.Infof("%q deployment in %q namespace and %q context rescaled to %d replicas", deploymentName, namespace, kcontext, replicas)
return nil
}

View File

@ -77,6 +77,9 @@ authentication:
x509:
clientCAFile: {{.ClientCAFile}}
cgroupDriver: {{.CgroupDriver}}
{{- range $key, $val := .KubeletConfigOpts}}
{{$key}}: {{$val}}
{{- end}}
clusterDomain: "{{if .DNSDomain}}{{.DNSDomain}}{{else}}cluster.local{{end}}"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -80,6 +80,9 @@ authentication:
x509:
clientCAFile: {{.ClientCAFile}}
cgroupDriver: {{.CgroupDriver}}
{{- range $key, $val := .KubeletConfigOpts}}
{{$key}}: {{$val}}
{{- end}}
clusterDomain: "{{if .DNSDomain}}{{.DNSDomain}}{{else}}cluster.local{{end}}"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -68,6 +68,13 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana
}
return nil, errors.Wrap(err, "getting cgroup driver")
}
// TODO: investigate why containerd (v1.6.15) does not work with k8s (v1.25.3) when both are set to use systemd cgroup driver
// issue: https://github.com/kubernetes/minikube/issues/15633
// until this is fixed, the workaround is to configure kubelet to use cgroupfs when containerd is using systemd
// note: pkg/minikube/bootstrapper/bsutil/kubeadm_test.go::TestGenerateKubeadmYAML also extects this override (for now)
if cc.KubernetesConfig.ContainerRuntime == constants.Containerd && cgroupDriver == constants.SystemdCgroupDriver {
cgroupDriver = constants.CgroupfsCgroupDriver
}
componentOpts, err := createExtraComponentConfig(k8s.ExtraOptions, version, componentFeatureArgs, cp)
if err != nil {
@ -86,6 +93,20 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana
}
klog.Infof("Using pod CIDR: %s", podCIDR)
// ref: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration
kubeletConfigOpts := kubeletConfigOpts(k8s.ExtraOptions)
// set hairpin mode to hairpin-veth to achieve hairpin NAT, because promiscuous-bridge assumes the existence of a container bridge named cbr0
// ref: https://kubernetes.io/docs/tasks/debug/debug-application/debug-service/#a-pod-fails-to-reach-itself-via-the-service-ip
kubeletConfigOpts["hairpinMode"] = k8s.ExtraOptions.Get("hairpin-mode", Kubelet)
if kubeletConfigOpts["hairpinMode"] == "" {
kubeletConfigOpts["hairpinMode"] = "hairpin-veth"
}
// set timeout for all runtime requests except long running requests - pull, logs, exec and attach
kubeletConfigOpts["runtimeRequestTimeout"] = k8s.ExtraOptions.Get("runtime-request-timeout", Kubelet)
if kubeletConfigOpts["runtimeRequestTimeout"] == "" {
kubeletConfigOpts["runtimeRequestTimeout"] = "15m"
}
opts := struct {
CertDir string
ServiceCIDR string
@ -134,7 +155,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana
ControlPlaneAddress: constants.ControlPlaneAlias,
KubeProxyOptions: createKubeProxyOptions(k8s.ExtraOptions),
ResolvConfSearchRegression: HasResolvConfSearchRegression(k8s.KubernetesVersion),
KubeletConfigOpts: kubeletConfigOpts(k8s.ExtraOptions),
KubeletConfigOpts: kubeletConfigOpts,
}
if k8s.ServiceCIDR != "" {

View File

@ -128,7 +128,7 @@ This test case has only 1 thing to test and that is the
networking/dnsDomain value
*/
func TestGenerateKubeadmYAMLDNS(t *testing.T) {
versions, err := recentReleases(0)
versions, err := recentReleases(6)
if err != nil {
t.Errorf("versions: %v", err)
}
@ -212,7 +212,7 @@ func TestGenerateKubeadmYAML(t *testing.T) {
fcr.SetCommandToOutput(map[string]string{
"docker info --format {{.CgroupDriver}}": "systemd\n",
"crio config": "cgroup_manager = \"systemd\"\n",
"sudo crictl info": "{\"config\": {\"systemdCgroup\": true}}",
"sudo crictl info": "{\"config\": {\"containerd\": {\"runtimes\": {\"runc\": {\"options\": {\"SystemdCgroup\": true}}}}}}",
})
tests := []struct {
name string
@ -221,13 +221,13 @@ func TestGenerateKubeadmYAML(t *testing.T) {
cfg config.ClusterConfig
}{
{"default", "docker", false, config.ClusterConfig{Name: "mk"}},
{"containerd", "containerd", false, config.ClusterConfig{Name: "mk"}},
{"containerd", "containerd", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ContainerRuntime: constants.Containerd}}},
{"crio", "crio", false, config.ClusterConfig{Name: "mk"}},
{"options", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}},
{"crio-options-gates", "crio", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}},
{"unknown-component", "docker", true, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}},
{"containerd-api-port", "containerd", false, config.ClusterConfig{Name: "mk", Nodes: []config.Node{{Port: 12345}}}},
{"containerd-pod-network-cidr", "containerd", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}},
{"containerd-api-port", "containerd", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ContainerRuntime: constants.Containerd}, Nodes: []config.Node{{Port: 12345}}}},
{"containerd-pod-network-cidr", "containerd", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ContainerRuntime: constants.Containerd, ExtraOptions: extraOptsPodCidr}}},
{"image-repository", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}},
}
for _, version := range versions {

View File

@ -37,8 +37,13 @@ import (
)
// kubeletConfigParams are the only allowed kubelet parameters for kubeadmin config file and not to be used as kubelet flags
// ref: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/ - look for "DEPRECATED" flags
// ref: https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/
// ref: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration
var kubeletConfigParams = []string{
"localStorageCapacityIsolation",
"runtime-request-timeout",
"hairpin-mode",
}
func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manager) (map[string]string, error) {

View File

@ -80,7 +80,7 @@ Wants=crio.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.18.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
ExecStart=/var/lib/minikube/binaries/v1.18.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests
[Install]
`,
@ -106,7 +106,7 @@ Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.18.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
ExecStart=/var/lib/minikube/binaries/v1.18.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests
[Install]
`,
@ -139,7 +139,7 @@ Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.18.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
ExecStart=/var/lib/minikube/binaries/v1.18.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests
[Install]
`,

View File

@ -149,7 +149,7 @@ func APIServerVersionMatch(client *kubernetes.Clientset, expected string) error
// by container runtime restart for example and there is a gap before it comes back
func WaitForAPIServerStatus(cr command.Runner, to time.Duration, hostname string, port int) (state.State, error) {
var st state.State
err := wait.PollImmediate(200*time.Millisecond, to, func() (bool, error) {
err := wait.PollImmediate(500*time.Millisecond, to, func() (bool, error) {
var err error
st, err = APIServerStatus(cr, hostname, port)
if st == state.Stopped {

View File

@ -51,7 +51,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -51,7 +51,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -51,7 +51,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -58,6 +58,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -52,6 +52,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -52,6 +52,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -52,6 +52,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "minikube.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -53,6 +53,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -55,6 +55,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -51,7 +51,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -51,7 +51,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -51,7 +51,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -58,6 +58,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -52,6 +52,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -52,6 +52,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -52,6 +52,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "minikube.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -53,6 +53,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -55,6 +55,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -51,7 +51,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -51,7 +51,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -51,7 +51,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -58,6 +58,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -52,6 +52,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -52,6 +52,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -52,6 +52,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "minikube.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -53,6 +53,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -55,6 +55,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -51,7 +51,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -51,7 +51,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -51,7 +51,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -58,6 +58,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -52,6 +52,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -52,6 +52,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -52,6 +52,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "minikube.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -53,6 +53,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -55,6 +55,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -49,7 +49,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -49,7 +49,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -49,7 +49,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -56,6 +56,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -50,6 +50,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -50,6 +50,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -50,6 +50,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "minikube.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -51,6 +51,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -53,6 +53,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -49,7 +49,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -49,7 +49,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -49,7 +49,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -56,6 +56,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -50,6 +50,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -50,6 +50,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -50,6 +50,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "minikube.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -51,6 +51,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -53,6 +53,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -49,7 +49,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -49,7 +49,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -49,7 +49,9 @@ kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -56,6 +56,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -50,6 +50,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -50,6 +50,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -50,6 +50,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "minikube.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -51,6 +51,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -53,6 +53,8 @@ authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100

View File

@ -30,6 +30,7 @@ import (
"strings"
"time"
"github.com/juju/mutex/v2"
"github.com/otiai10/copy"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime"
@ -46,6 +47,7 @@ import (
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/vmpath"
"k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/util/lock"
)
// SetupCerts gets the generated credentials required to talk to the APIServer.
@ -177,6 +179,17 @@ func generateSharedCACerts() (CACerts, bool, error) {
},
}
// create a lock for "ca-certs" to avoid race condition over multiple minikube instances rewriting shared ca certs
hold := filepath.Join(globalPath, "ca-certs")
spec := lock.PathMutexSpec(hold)
spec.Timeout = 1 * time.Minute
klog.Infof("acquiring lock for shared ca certs: %+v", spec)
releaser, err := mutex.Acquire(spec)
if err != nil {
return cc, false, errors.Wrapf(err, "unable to acquire lock for shared ca certs %+v", spec)
}
defer releaser.Release()
for _, ca := range caCertSpecs {
if isValid(ca.certPath, ca.keyPath) {
klog.Infof("skipping %s CA generation: %s", ca.subject, ca.keyPath)

View File

@ -174,7 +174,7 @@ func KindNet(repo string) string {
}
// all calico images are from https://docs.projectcalico.org/manifests/calico.yaml
const calicoVersion = "v3.20.0"
const calicoVersion = "v3.24.5"
const calicoRepo = "docker.io/calico"
// CalicoDaemonSet returns the image used for calicoDaemonSet
@ -188,11 +188,6 @@ func CalicoDeployment(repo string) string {
return calicoCommon(repo, "kube-controllers")
}
// CalicoFelixDriver returns image used for felix driver
func CalicoFelixDriver(repo string) string {
return calicoCommon(repo, "pod2daemon-flexvol")
}
// CalicoBin returns image used for calico binary image
func CalicoBin(repo string) string {
return calicoCommon(repo, "cni")

View File

@ -601,7 +601,8 @@ func (k *Bootstrapper) needsReconfigure(conf string, hostname string, port int,
}
// cruntime.Enable() may restart kube-apiserver but does not wait for it to return back
apiStatusTimeout := 3000 * time.Millisecond
// could take five-ish seconds, so hopefully 10 seconds is sufficient to wait for api server to come back up
apiStatusTimeout := 10 * time.Second
st, err := kverify.WaitForAPIServerStatus(k.c, apiStatusTimeout, hostname, port)
if err != nil {
klog.Infof("needs reconfigure: apiserver error: %v", err)
@ -1117,7 +1118,7 @@ func (k *Bootstrapper) elevateKubeSystemPrivileges(cfg config.ClusterConfig) err
// stopKubeSystem stops all the containers in the kube-system to prevent #8740 when doing hot upgrade
func (k *Bootstrapper) stopKubeSystem(cfg config.ClusterConfig) error {
klog.Info("stopping kube-system containers ...")
cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c})
cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Socket: cfg.KubernetesConfig.CRISocket, Runner: k.c})
if err != nil {
return errors.Wrap(err, "new cruntime")
}

View File

@ -28,8 +28,11 @@ import (
)
// bridge is what minikube defaulted to when `--enable-default-cni=true`
// https://github.com/containernetworking/plugins/blob/master/plugins/main/bridge/README.md
// ref: https://www.cni.dev/plugins/current/main/bridge/
// ref: https://www.cni.dev/plugins/current/meta/portmap/
// note: "cannot set hairpin mode and promiscuous mode at the same time"
// ref: https://github.com/containernetworking/plugins/blob/7e9ada51e751740541969e1ea5a803cbf45adcf3/plugins/main/bridge/bridge.go#L424
var bridgeConf = template.Must(template.New("bridge").Parse(`
{
"cniVersion": "0.3.1",

View File

@ -46,9 +46,9 @@ type Calico struct {
}
type calicoTmplStruct struct {
PodCIDR string
DeploymentImageName string
DaemonSetImageName string
FelixDriverImageName string
BinaryImageName string
LegacyPodDisruptionBudget bool
}
@ -66,9 +66,9 @@ func (c Calico) manifest() (assets.CopyableFile, error) {
}
input := &calicoTmplStruct{
PodCIDR: DefaultPodCIDR,
DeploymentImageName: images.CalicoDeployment(c.cc.KubernetesConfig.ImageRepository),
DaemonSetImageName: images.CalicoDaemonSet(c.cc.KubernetesConfig.ImageRepository),
FelixDriverImageName: images.CalicoFelixDriver(c.cc.KubernetesConfig.ImageRepository),
BinaryImageName: images.CalicoBin(c.cc.KubernetesConfig.ImageRepository),
LegacyPodDisruptionBudget: k8sVersion.LT(semver.Version{Major: 1, Minor: 25}),
}

File diff suppressed because it is too large Load Diff

View File

@ -20,8 +20,11 @@ package cni
import (
"context"
"fmt"
"net"
"os/exec"
"path"
"path/filepath"
"strings"
"time"
"github.com/blang/semver/v4"
@ -43,20 +46,6 @@ const (
// DefaultConfDir is the default CNI Config Directory path
DefaultConfDir = "/etc/cni/net.d"
// CustomConfDir is the custom CNI Config Directory path used to avoid conflicting CNI configs
// ref: https://github.com/kubernetes/minikube/issues/10984 and https://github.com/kubernetes/minikube/pull/11106
CustomConfDir = "/etc/cni/net.mk"
)
var (
// ConfDir is the CNI Config Directory path that can be customised, defaulting to DefaultConfDir
ConfDir = DefaultConfDir
// Network is the network name that CNI should use (eg, "kindnet").
// Currently, only crio (and podman) can use it, so that setting custom ConfDir is not necessary.
// ref: https://github.com/cri-o/cri-o/issues/2121 (and https://github.com/containers/podman/issues/2370)
// ref: https://github.com/cri-o/cri-o/blob/master/docs/crio.conf.5.md#crionetwork-table
Network = ""
)
// Runner is the subset of command.Runner this package consumes
@ -115,10 +104,6 @@ func New(cc *config.ClusterConfig) (Manager, error) {
cnm, err = NewCustom(*cc, cc.KubernetesConfig.CNI)
}
if err := configureCNI(cc, cnm); err != nil {
klog.Errorf("unable to set CNI Config Directory: %v", err)
}
return cnm, err
}
@ -145,11 +130,6 @@ func chooseDefault(cc config.ClusterConfig) Manager {
return Bridge{}
}
if driver.BareMetal(cc.Driver) {
klog.Infof("Driver %s used, CNI unnecessary in this configuration, recommending no CNI", cc.Driver)
return Disabled{cc: cc}
}
if len(cc.Nodes) > 1 || cc.MultiNodeRequested {
// Enables KindNet CNI in master in multi node cluster, This solves the network problem
// inside pod for multi node clusters. See https://github.com/kubernetes/minikube/issues/9838.
@ -159,10 +139,22 @@ func chooseDefault(cc config.ClusterConfig) Manager {
if cc.KubernetesConfig.ContainerRuntime != constants.Docker {
if driver.IsKIC(cc.Driver) {
klog.Infof("%q driver + %s runtime found, recommending kindnet", cc.Driver, cc.KubernetesConfig.ContainerRuntime)
klog.Infof("%q driver + %q runtime found, recommending kindnet", cc.Driver, cc.KubernetesConfig.ContainerRuntime)
return KindNet{cc: cc}
}
klog.Infof("%q driver + %s runtime found, recommending bridge", cc.Driver, cc.KubernetesConfig.ContainerRuntime)
klog.Infof("%q driver + %q runtime found, recommending bridge", cc.Driver, cc.KubernetesConfig.ContainerRuntime)
return Bridge{cc: cc}
}
// for docker container runtime and k8s v1.24+ where dockershim and kubenet were removed, we fallback to bridge cni for cri-docker(d)
// ref: https://github.com/Mirantis/cri-dockerd#important
// ref: https://github.com/Mirantis/cri-dockerd#to-use-with-kubernetes
// note: currently, default cni that we "distribute" (in /etc/cni/net.d) is based on cri-o bridge, and
// because it does not currently use portmap plugin, we pick "our" bridge instead (cri-o one will be disabled automatically)
// ref: https://github.com/cri-o/cri-o/blob/f317b267ddef21aee5ffc92d890a77112b006815/contrib/cni/10-crio-bridge.conflist
kv, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion)
if err == nil && kv.GTE(semver.MustParse("1.24.0-alpha.2")) {
klog.Infof("%q driver + %q container runtime found on kubernetes v1.24+, recommending bridge", cc.Driver, cc.KubernetesConfig.ContainerRuntime)
return Bridge{cc: cc}
}
@ -200,41 +192,118 @@ func applyManifest(cc config.ClusterConfig, r Runner, f assets.CopyableFile) err
return nil
}
// configureCNI - to avoid conflicting CNI configs, it sets:
// - for crio: 'cni_default_network' config param via cni.Network
// - for containerd and docker: kubelet's '--cni-conf-dir' flag to custom CNI Config Directory path (same used also by CNI Deployment).
// ref: https://github.com/kubernetes/minikube/issues/10984 and https://github.com/kubernetes/minikube/pull/11106
// Note: currently, this change affects only Kindnet CNI (and all multinodes using it), but it can be easily expanded to other/all CNIs if needed.
// Note2: Cilium does not need workaround as they automatically restart pods after CNI is successfully deployed.
func configureCNI(cc *config.ClusterConfig, cnm Manager) error {
if _, kindnet := cnm.(KindNet); kindnet {
// crio only needs CNI network name; hopefully others (containerd, docker and kubeadm/kubelet) will follow eventually
if cc.KubernetesConfig.ContainerRuntime == constants.CRIO {
Network = "kindnet"
return nil
}
version, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion)
if err != nil {
return err
}
// The CNI configuration is handled by CRI in 1.24+
if version.LT(semver.MustParse("1.24.0-alpha.2")) {
// for containerd and docker: auto-set custom CNI via kubelet's 'cni-conf-dir' param, if not user-specified
eo := fmt.Sprintf("kubelet.cni-conf-dir=%s", CustomConfDir)
if !cc.KubernetesConfig.ExtraOptions.Exists(eo) {
klog.Infof("auto-setting extra-config to %q", eo)
if err := cc.KubernetesConfig.ExtraOptions.Set(eo); err != nil {
return fmt.Errorf("failed auto-setting extra-config %q: %v", eo, err)
}
ConfDir = CustomConfDir
klog.Infof("extra-config set to %q", eo)
} else {
// respect user-specified custom CNI Config Directory
ConfDir = cc.KubernetesConfig.ExtraOptions.Get("cni-conf-dir", "kubelet")
}
} else {
ConfDir = CustomConfDir
}
// ConfigureLoopbackCNI configures loopback cni.
// If disable is true, sets extension of its config file in /etc/cni/net.d to "mk_disabled".
// Otherwise, ensures loopback cni has expected version ("1.0.0") and valid name ("loopback") in its config file in /etc/cni/net.d.
// Note: cri-o is leaving out name atm (https://github.com/cri-o/cri-o/pull/6273).
// Avoid errors like:
// - Failed to create pod sandbox: rpc error: code = Unknown desc = [failed to set up sandbox container "..." network for pod "...": networkPlugin cni failed to set up pod "..." network: missing network name:,
// - failed to clean up sandbox container "..." network for pod "...": networkPlugin cni failed to teardown pod "..." network: missing network name]
// It is caller's responsibility to restart container runtime for these changes to take effect.
func ConfigureLoopbackCNI(r Runner, disable bool) error {
loopback := "/etc/cni/net.d/*loopback.conf*" // usually: 200-loopback.conf
// turn { "cniVersion": "0.3.1", "type": "loopback" }
// into { "cniVersion": "0.3.1", "name": "loopback", "type": "loopback" }
if _, err := r.RunCmd(exec.Command("sh", "-c", fmt.Sprintf("stat %s", loopback))); err != nil {
klog.Warningf("loopback cni configuration skipped: %q not found", loopback)
return nil
}
findExec := []string{"find", filepath.Dir(loopback), "-maxdepth", "1", "-type", "f", "-name", filepath.Base(loopback), "-not", "-name", "*.mk_disabled", "-exec", "sh", "-c"}
if disable {
if _, err := r.RunCmd(exec.Command(
"sudo", append(findExec,
`sudo mv {} {}.mk_disabled`, ";")...)); err != nil {
return fmt.Errorf("unable to disable loopback cni %q: %v", loopback, err)
}
klog.Infof("loopback cni configuration disabled: %q found", loopback)
return nil
}
if _, err := r.RunCmd(exec.Command(
"sudo", append(findExec,
`grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}`, ";")...)); err != nil {
return fmt.Errorf("unable to patch loopback cni config %q: %v", loopback, err)
}
klog.Infof("loopback cni configuration patched: %q found", loopback)
return nil
}
// ConfigureDefaultBridgeCNIs configures all default bridge CNIs on a node (designated by runner).
// If network plugin is set (could be, eg "cni" or "kubenet"), it will disable all default bridges to avoid conflicts.
// Otherwise, it will configure all default bridges to match DefaultPodCIDR subnet range.
// It's usually called before deploying new CNI and on node restarts, to avoid conflicts and flip-flopping of pods' ip addresses.
// It is caller's responsibility to restart container runtime for these changes to take effect.
func ConfigureDefaultBridgeCNIs(r Runner, networkPlugin string) error {
if networkPlugin != "" {
return disableAllBridgeCNIs(r)
}
return configureAllBridgeCNIs(r, DefaultPodCIDR)
}
// disableAllBridgeCNIs disables all bridge cnis by changing extension to "mk_disabled" of all *bridge* config file(s) found in default location (ie, /etc/cni/net.d).
func disableAllBridgeCNIs(r Runner) error {
path := "/etc/cni/net.d"
out, err := r.RunCmd(exec.Command(
// for cri-o, we also disable 87-podman.conflist (that does not have 'bridge' in its name)
"sudo", "find", path, "-maxdepth", "1", "-type", "f", "(", "(", "-name", "*bridge*", "-or", "-name", "*podman*", ")", "-and", "-not", "-name", "*.mk_disabled", ")", "-printf", "%p, ", "-exec", "sh", "-c",
`sudo mv {} {}.mk_disabled`, ";"))
if err != nil {
return fmt.Errorf("failed to disable all bridge cni configs in %q: %v", path, err)
}
configs := strings.Trim(out.Stdout.String(), ", ")
if len(configs) == 0 {
klog.Infof("no active bridge cni configs found in %q - nothing to disable", path)
return nil
}
klog.Infof("disabled [%s] bridge cni config(s)", configs)
return nil
}
// configureAllBridgeCNIs configures all bridge cnis by changing ip address range to match DefaultPodCIDR in all *bridge* config file(s) found in default location (ie, /etc/cni/net.d).
// ref: https://github.com/containernetworking/cni/blob/main/libcni/conf.go
// ref: https://kubernetes.io/docs/tasks/administer-cluster/migrating-from-dockershim/troubleshooting-cni-plugin-related-errors/
func configureAllBridgeCNIs(r Runner, cidr string) error {
// non-podman bridge configs:
out, err := r.RunCmd(exec.Command(
"sudo", "find", DefaultConfDir, "-maxdepth", "1", "-type", "f", "-name", "*bridge*", "-not", "-name", "*podman*", "-not", "-name", "*.mk_disabled", "-printf", "%p, ", "-exec", "sh", "-c",
// remove ipv6 entries to avoid "failed to set bridge addr: could not add IP address to \"cni0\": permission denied"
// ref: https://github.com/cri-o/cri-o/issues/3555
// then also remove trailing comma after ipv4 elements, if any
// ie, this will transform from, eg:
// from: "ranges": [ [{ "subnet": "10.85.0.0/16" }], [{ "subnet": "1100:200::/24" }] ]
// to: "ranges": [ [{ "subnet": "10.244.0.0/16" }] ]
// getting something similar to https://github.com/cri-o/cri-o/blob/main/contrib/cni/11-crio-ipv4-bridge.conflist
fmt.Sprintf(`sudo sed -i -r -e '/"dst": ".*:.*"/d' -e 's|^(.*)"dst": (.*)[,*]$|\1"dst": \2|g' -e '/"subnet": ".*:.*"/d' -e 's|^(.*)"subnet": ".*"(.*)[,*]$|\1"subnet": "%s"\2|g' {}`, cidr), ";"))
if err != nil {
return fmt.Errorf("failed to configure non-podman bridge cni configs in %q: %v", DefaultConfDir, err)
}
configs := out.Stdout.String()
// podman bridge config(s):
// could be eg, 87-podman-bridge.conflist or 87-podman.conflist
// ref: https://github.com/containers/podman/blob/main/cni/87-podman-bridge.conflist
ip, ipnet, err := net.ParseCIDR(cidr)
if err != nil || ip.To4() == nil {
return fmt.Errorf("cidr %q is not valid ipv4 address: %v", cidr, err)
}
gateway := ip.Mask(ipnet.Mask)
gateway[3]++
out, err = r.RunCmd(exec.Command(
"sudo", "find", DefaultConfDir, "-maxdepth", "1", "-type", "f", "-name", "*podman*", "-not", "-name", "*.mk_disabled", "-printf", "%p, ", "-exec", "sh", "-c",
fmt.Sprintf(`sudo sed -i -r -e 's|^(.*)"subnet": ".*"(.*)$|\1"subnet": "%s"\2|g' -e 's|^(.*)"gateway": ".*"(.*)$|\1"gateway": "%s"\2|g' {}`, cidr, gateway), ";"))
if err != nil {
return fmt.Errorf("failed to configure podman bridge cni configs in %q: %v", DefaultConfDir, err)
}
configs += out.Stdout.String()
configs = strings.Trim(configs, ", ")
if len(configs) == 0 {
klog.Infof("no active bridge cni configs found in %q - nothing to configure", DefaultConfDir)
return nil
}
klog.Infof("configured [%s] bridge cni config(s)", configs)
return nil
}

View File

@ -18,634 +18,25 @@ package cni
import (
"bytes"
"fmt"
_ "embed"
"os/exec"
"path/filepath"
"text/template"
"github.com/blang/semver/v4"
"github.com/pkg/errors"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/util"
)
// From https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
var flannelYaml = `---{{if .LegacyPodSecurityPolicy}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'{{else}}
kind: Namespace
apiVersion: v1
metadata:
name: kube-system
labels:
pod-security.kubernetes.io/enforce: privileged{{end}}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:{{if .LegacyPodSecurityPolicy}}
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']{{end}}
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-amd64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- amd64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- arm64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-arm64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-arm64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- arm
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-arm
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-arm
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-ppc64le
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- ppc64le
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-ppc64le
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-ppc64le
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-s390x
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- s390x
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-s390x
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-s390x
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
`
// ref: https://github.com/flannel-io/flannel#deploying-flannel-manually: "For Kubernetes v1.17+"; multi-arch support
//go:embed flannel.yaml
var flannelYaml string
// https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
var flannelTmpl = template.Must(template.New("flannel").Parse(flannelYaml))
type flannelTmplStruct struct {
LegacyPodSecurityPolicy bool
PodCIDR string
}
// Flannel is the Flannel CNI manager
@ -666,36 +57,23 @@ func (c Flannel) Apply(r Runner) error {
return errors.Wrap(err, "required 'portmap' CNI plug-in not found")
}
if driver.IsKIC(c.cc.Driver) {
conflict := "/etc/cni/net.d/100-crio-bridge.conf"
_, err := r.RunCmd(exec.Command("stat", conflict))
if err != nil {
klog.Warningf("%s not found, skipping disable step: %v", conflict, err)
return nil
}
_, err = r.RunCmd(exec.Command("sudo", "mv", conflict, filepath.Join(filepath.Dir(conflict), "DISABLED-"+filepath.Base(conflict))))
if err != nil {
klog.Errorf("unable to disable %s: %v", conflict, err)
}
}
k8sVersion, err := util.ParseKubernetesVersion(c.cc.KubernetesConfig.KubernetesVersion)
m, err := c.manifest()
if err != nil {
return fmt.Errorf("failed to parse Kubernetes version: %v", err)
return errors.Wrap(err, "manifest")
}
return applyManifest(c.cc, r, m)
}
// manifest returns a Kubernetes manifest for a CNI
func (c Flannel) manifest() (assets.CopyableFile, error) {
input := &flannelTmplStruct{
LegacyPodSecurityPolicy: k8sVersion.LT(semver.Version{Major: 1, Minor: 25}),
PodCIDR: DefaultPodCIDR,
}
b := bytes.Buffer{}
if err := flannelTmpl.Execute(&b, input); err != nil {
return err
return nil, err
}
return applyManifest(c.cc, r, manifestAsset(b.Bytes()))
return manifestAsset(b.Bytes()), nil
}
// CIDR returns the default CIDR used by this CNI

View File

@ -0,0 +1,212 @@
---
kind: Namespace
apiVersion: v1
metadata:
name: kube-flannel
labels:
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- "networking.k8s.io"
resources:
- clustercidrs
verbs:
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-flannel
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "{{ .PodCIDR }}",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-flannel
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
#image: flannelcni/flannel-cni-plugin:v1.1.2 #for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.2
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
#image: flannelcni/flannel:v0.20.2 #for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
#image: flannelcni/flannel:v0.20.2 #for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate

View File

@ -166,7 +166,7 @@ func (c KindNet) manifest() (assets.CopyableFile, error) {
DefaultRoute: "0.0.0.0/0", // assumes IPv4
PodCIDR: DefaultPodCIDR,
ImageName: images.KindNet(c.cc.KubernetesConfig.ImageRepository),
CNIConfDir: ConfDir,
CNIConfDir: DefaultConfDir,
}
b := bytes.Buffer{}

View File

@ -54,6 +54,7 @@ const (
SSHPort = 22
// RegistryAddonPort os the default registry addon port
RegistryAddonPort = 5000
// Containerd is the default name and spelling for the containerd container runtime
Containerd = "containerd"
// CRIO is the default name and spelling for the cri-o container runtime
@ -63,6 +64,12 @@ const (
// DefaultContainerRuntime is our default container runtime
DefaultContainerRuntime = ""
// cgroup drivers
DefaultCgroupDriver = "systemd"
CgroupfsCgroupDriver = "cgroupfs"
SystemdCgroupDriver = "systemd"
UnknownCgroupDriver = ""
// APIServerName is the default API server name
APIServerName = "minikubeCA"
// ClusterDNSDomain is the default DNS domain

View File

@ -26,6 +26,7 @@ import (
"os"
"os/exec"
"path"
"runtime"
"strings"
"time"
@ -37,6 +38,7 @@ import (
"k8s.io/minikube/pkg/minikube/cni"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/style"
"k8s.io/minikube/pkg/minikube/sysinit"
@ -127,18 +129,51 @@ func (r *Containerd) Available() error {
}
// generateContainerdConfig sets up /etc/containerd/config.toml & /etc/containerd/containerd.conf.d/02-containerd.conf
func generateContainerdConfig(cr CommandRunner, imageRepository string, kv semver.Version, forceSystemd bool, insecureRegistry []string, inUserNamespace bool) error {
func generateContainerdConfig(cr CommandRunner, imageRepository string, kv semver.Version, cgroupDriver string, insecureRegistry []string, inUserNamespace bool) error {
pauseImage := images.Pause(kv, imageRepository)
if _, err := cr.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo sed -e 's|^.*sandbox_image = .*$|sandbox_image = \"%s\"|' -i %s", pauseImage, containerdConfigFile))); err != nil {
if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = %q|' %s`, pauseImage, containerdConfigFile))); err != nil {
return errors.Wrap(err, "update sandbox_image")
}
if _, err := cr.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo sed -e 's|^.*restrict_oom_score_adj = .*$|restrict_oom_score_adj = %t|' -i %s", inUserNamespace, containerdConfigFile))); err != nil {
if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = %t|' %s`, inUserNamespace, containerdConfigFile))); err != nil {
return errors.Wrap(err, "update restrict_oom_score_adj")
}
if _, err := cr.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo sed -e 's|^.*SystemdCgroup = .*$|SystemdCgroup = %t|' -i %s", forceSystemd, containerdConfigFile))); err != nil {
return errors.Wrap(err, "update SystemdCgroup")
// configure cgroup driver
if cgroupDriver == constants.UnknownCgroupDriver {
klog.Warningf("unable to configure containerd to use unknown cgroup driver, will use default %q instead", constants.DefaultCgroupDriver)
cgroupDriver = constants.DefaultCgroupDriver
}
if _, err := cr.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo sed -e 's|^.*conf_dir = .*$|conf_dir = \"%s\"|' -i %s", cni.ConfDir, containerdConfigFile))); err != nil {
klog.Infof("configuring containerd to use %q as cgroup driver...", cgroupDriver)
useSystemd := cgroupDriver == constants.SystemdCgroupDriver
if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = %t|g' %s`, useSystemd, containerdConfigFile))); err != nil {
return errors.Wrap(err, "configuring SystemdCgroup")
}
// handle deprecated/removed features
// ref: https://github.com/containerd/containerd/blob/main/RELEASES.md#deprecated-features
if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' %s`, containerdConfigFile))); err != nil {
return errors.Wrap(err, "configuring io.containerd.runtime version")
}
// avoid containerd v1.6.14+ "failed to load plugin io.containerd.grpc.v1.cri" error="invalid plugin config: `systemd_cgroup` only works for runtime io.containerd.runtime.v1.linux" error
// that then leads to crictl "getting the runtime version: rpc error: code = Unimplemented desc = unknown service runtime.v1alpha2.RuntimeService" error
// ref: https://github.com/containerd/containerd/issues/4203
if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i '/systemd_cgroup/d' %s`, containerdConfigFile))); err != nil {
return errors.Wrap(err, "removing deprecated systemd_cgroup param")
}
// "runtime_type" has to be specified and it should be "io.containerd.runc.v2"
// ref: https://github.com/containerd/containerd/issues/6964#issuecomment-1132378279
if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' %s`, containerdConfigFile))); err != nil {
return errors.Wrap(err, "configuring io.containerd.runc version")
}
// ensure conf_dir is using '/etc/cni/net.d'
// we might still want to try removing '/etc/cni/net.mk' in case of upgrade from previous minikube version that had/used it
if _, err := cr.RunCmd(exec.Command("sh", "-c", `sudo rm -rf /etc/cni/net.mk`)); err != nil {
klog.Warningf("unable to remove /etc/cni/net.mk directory: %v", err)
}
if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = %q|g' %s`, cni.DefaultConfDir, containerdConfigFile))); err != nil {
return errors.Wrap(err, "update conf_dir")
}
@ -175,7 +210,8 @@ func generateContainerdConfig(cr CommandRunner, imageRepository string, kv semve
}
// Enable idempotently enables containerd on a host
func (r *Containerd) Enable(disOthers, forceSystemd, inUserNamespace bool) error {
// It is also called by docker.Enable() - if bound to containerd, to enforce proper containerd configuration completed by service restart.
func (r *Containerd) Enable(disOthers bool, cgroupDriver string, inUserNamespace bool) error {
if inUserNamespace {
if err := CheckKernelCompatibility(r.Runner, 5, 11); err != nil {
// For using overlayfs
@ -194,13 +230,26 @@ func (r *Containerd) Enable(disOthers, forceSystemd, inUserNamespace bool) error
if err := populateCRIConfig(r.Runner, r.SocketPath()); err != nil {
return err
}
if err := generateContainerdConfig(r.Runner, r.ImageRepository, r.KubernetesVersion, forceSystemd, r.InsecureRegistry, inUserNamespace); err != nil {
if err := generateContainerdConfig(r.Runner, r.ImageRepository, r.KubernetesVersion, cgroupDriver, r.InsecureRegistry, inUserNamespace); err != nil {
return err
}
if err := enableIPForwarding(r.Runner); err != nil {
return err
}
// TODO (@prezha): remove this hack after proper version update in minikube release
// ref: https://github.com/containerd/containerd/blob/main/RELEASES.md#kubernetes-support
targetVersion := "1.6.15"
currentVersion, err := r.Version()
if err == nil && semver.MustParse(targetVersion).GT(semver.MustParse(currentVersion)) {
klog.Infof("replacing original containerd with v%s-linux-%s", targetVersion, runtime.GOARCH)
_ = r.Init.ForceStop("containerd")
if err := updateContainerdBinary(r.Runner, targetVersion, runtime.GOARCH); err != nil {
klog.Warningf("unable to replace original containerd with v%s-linux-%s: %v", targetVersion, runtime.GOARCH, err)
}
}
// Otherwise, containerd will fail API requests with 'Unimplemented'
return r.Init.Restart("containerd")
}
@ -387,21 +436,41 @@ func (r *Containerd) CGroupDriver() (string, error) {
if err != nil {
return "", err
}
if info["config"] == nil {
return "", errors.Wrapf(err, "missing config")
// crictl also returns default ('false') value for "systemdCgroup" - deprecated "systemd_cgroup" config param that is now irrelevant
// ref: https://github.com/containerd/containerd/blob/5e7baa2eb3dab4c4365dd63c05ed8b3fa94b9271/pkg/cri/config/config.go#L277-L280
// ref: https://github.com/containerd/containerd/issues/4574#issuecomment-1298727099
// so, we try to extract runc's "SystemdCgroup" option that we care about
// ref: https://github.com/containerd/containerd/issues/4203#issuecomment-651532765
j, err := json.Marshal(info)
if err != nil {
return "", fmt.Errorf("marshalling: %v", err)
}
config, ok := info["config"].(map[string]interface{})
if !ok {
return "", errors.Wrapf(err, "config not map")
s := struct {
Config struct {
Containerd struct {
Runtimes struct {
Runc struct {
Options struct {
SystemdCgroup bool `json:"SystemdCgroup"`
} `json:"options"`
} `json:"runc"`
} `json:"runtimes"`
} `json:"containerd"`
} `json:"config"`
}{}
if err := json.Unmarshal(j, &s); err != nil {
return "", fmt.Errorf("unmarshalling: %v", err)
}
cgroupManager := "cgroupfs" // default
switch config["systemdCgroup"] {
case false:
cgroupManager = "cgroupfs"
// note: if "path" does not exists, SystemdCgroup will evaluate to false as 'default' value for bool => constants.CgroupfsCgroupDriver
switch s.Config.Containerd.Runtimes.Runc.Options.SystemdCgroup {
case true:
cgroupManager = "systemd"
return constants.SystemdCgroupDriver, nil
case false:
return constants.CgroupfsCgroupDriver, nil
default:
return constants.DefaultCgroupDriver, nil
}
return cgroupManager, nil
}
// KubeletOptions returns kubelet options for a containerd
@ -410,7 +479,6 @@ func (r *Containerd) KubeletOptions() map[string]string {
"container-runtime": "remote",
"container-runtime-endpoint": fmt.Sprintf("unix://%s", r.SocketPath()),
"image-service-endpoint": fmt.Sprintf("unix://%s", r.SocketPath()),
"runtime-request-timeout": "15m",
}
}
@ -500,7 +568,7 @@ func (r *Containerd) Preload(cc config.ClusterConfig) error {
if rr, err := r.Runner.RunCmd(exec.Command("sudo", "tar", "-I", "lz4", "-C", "/var", "-xf", dest)); err != nil {
return errors.Wrapf(err, "extracting tarball: %s", rr.Output())
}
klog.Infof("Took %f seconds t extract the tarball", time.Since(t).Seconds())
klog.Infof("Took %f seconds to extract the tarball", time.Since(t).Seconds())
// remove the tarball in the VM
if err := r.Runner.Remove(fa); err != nil {
@ -510,7 +578,7 @@ func (r *Containerd) Preload(cc config.ClusterConfig) error {
return r.Restart()
}
// Restart restarts Docker on a host
// Restart restarts this container runtime on a host
func (r *Containerd) Restart() error {
return r.Init.Restart("containerd")
}

View File

@ -190,7 +190,7 @@ func killCRIContainers(cr CommandRunner, ids []string) error {
klog.Infof("Killing containers: %s", ids)
crictl := getCrictlPath(cr)
args := append([]string{crictl, "rm"}, ids...)
args := append([]string{crictl, "rm", "--force"}, ids...)
c := exec.Command("sudo", args...)
if _, err := cr.RunCmd(c); err != nil {
return errors.Wrap(err, "crictl")
@ -232,7 +232,11 @@ func stopCRIContainers(cr CommandRunner, ids []string) error {
klog.Infof("Stopping containers: %s", ids)
crictl := getCrictlPath(cr)
args := append([]string{crictl, "stop"}, ids...)
// bring crictl stop timeout on par with docker:
// - docker stop --help => -t, --time int Seconds to wait for stop before killing it (default 10)
// - crictl stop --help => --timeout value, -t value Seconds to wait to kill the container after a graceful stop is requested (default: 0)
// to prevent "stuck" containers blocking ports (eg, "[ERROR Port-2379|2380]: Port 2379|2380 is in use" for etcd during "hot" k8s upgrade)
args := append([]string{crictl, "stop", "--timeout=10"}, ids...)
c := exec.Command("sudo", args...)
if _, err := cr.RunCmd(c); err != nil {
return errors.Wrap(err, "crictl")

View File

@ -31,16 +31,16 @@ import (
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/cni"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/style"
"k8s.io/minikube/pkg/minikube/sysinit"
)
const (
// CRIOConfFile is the path to the CRI-O configuration
// crioConfigFile is the path to the CRI-O configuration
crioConfigFile = "/etc/crio/crio.conf.d/02-crio.conf"
)
@ -53,29 +53,40 @@ type CRIO struct {
Init sysinit.Manager
}
// generateCRIOConfig sets up /etc/crio/crio.conf
func generateCRIOConfig(cr CommandRunner, imageRepository string, kv semver.Version) error {
// generateCRIOConfig sets up pause image and cgroup manager for cri-o in crioConfigFile
func generateCRIOConfig(cr CommandRunner, imageRepository string, kv semver.Version, cgroupDriver string) error {
pauseImage := images.Pause(kv, imageRepository)
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo sed -e 's|^.*pause_image = .*$|pause_image = \"%s\"|' -i %s", pauseImage, crioConfigFile))
klog.Infof("configure cri-o to use %q pause image...", pauseImage)
c := exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i 's|^.*pause_image = .*$|pause_image = %q|' %s`, pauseImage, crioConfigFile))
if _, err := cr.RunCmd(c); err != nil {
return errors.Wrap(err, "generateCRIOConfig")
return errors.Wrap(err, "update pause_image")
}
if cni.Network != "" {
klog.Infof("Updating CRIO to use the custom CNI network %q", cni.Network)
if _, err := cr.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo sed -e 's|^.*cni_default_network = .*$|cni_default_network = \"%s\"|' -i %s", cni.Network, crioConfigFile))); err != nil {
return errors.Wrap(err, "update network_dir")
}
// configure cgroup driver
if cgroupDriver == constants.UnknownCgroupDriver {
klog.Warningf("unable to configure cri-o to use unknown cgroup driver, will use default %q instead", constants.DefaultCgroupDriver)
cgroupDriver = constants.DefaultCgroupDriver
}
klog.Infof("configuring cri-o to use %q as cgroup driver...", cgroupDriver)
if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i 's|^.*cgroup_manager = .*$|cgroup_manager = %q|' %s`, cgroupDriver, crioConfigFile))); err != nil {
return errors.Wrap(err, "configuring cgroup_manager")
}
// explicitly set conmon_cgroup to avoid errors like:
// - level=fatal msg="Validating runtime config: conmon cgroup should be 'pod' or a systemd slice"
// - level=fatal msg="Validating runtime config: cgroupfs manager conmon cgroup should be 'pod' or empty"
// ref: https://github.com/cri-o/cri-o/pull/3940
// ref: https://github.com/cri-o/cri-o/issues/6047
// ref: https://kubernetes.io/docs/setup/production-environment/container-runtimes/#cgroup-driver
if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i '/conmon_cgroup = .*/d' %s`, crioConfigFile))); err != nil {
return errors.Wrap(err, "removing conmon_cgroup")
}
if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i '/cgroup_manager = .*/a conmon_cgroup = %q' %s`, "pod", crioConfigFile))); err != nil {
return errors.Wrap(err, "configuring conmon_cgroup")
}
return nil
}
func (r *CRIO) forceSystemd() error {
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo sed -e 's|^.*cgroup_manager = .*$|cgroup_manager = \"systemd\"|' -i %s", crioConfigFile))
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "force systemd")
// we might still want to try removing '/etc/cni/net.mk' in case of upgrade from previous minikube version that had/used it
if _, err := cr.RunCmd(exec.Command("sh", "-c", `sudo rm -rf /etc/cni/net.mk`)); err != nil {
klog.Warningf("unable to remove /etc/cni/net.mk directory: %v", err)
}
return nil
@ -185,7 +196,7 @@ Environment="_CRIO_ROOTLESS=1"
}
// Enable idempotently enables CRIO on a host
func (r *CRIO) Enable(disOthers, forceSystemd, inUserNamespace bool) error {
func (r *CRIO) Enable(disOthers bool, cgroupDriver string, inUserNamespace bool) error {
if disOthers {
if err := disableOthers(r, r.Runner); err != nil {
klog.Warningf("disableOthers: %v", err)
@ -194,17 +205,12 @@ func (r *CRIO) Enable(disOthers, forceSystemd, inUserNamespace bool) error {
if err := populateCRIConfig(r.Runner, r.SocketPath()); err != nil {
return err
}
if err := generateCRIOConfig(r.Runner, r.ImageRepository, r.KubernetesVersion); err != nil {
if err := generateCRIOConfig(r.Runner, r.ImageRepository, r.KubernetesVersion, cgroupDriver); err != nil {
return err
}
if err := enableIPForwarding(r.Runner); err != nil {
return err
}
if forceSystemd {
if err := r.forceSystemd(); err != nil {
return err
}
}
if inUserNamespace {
if err := CheckKernelCompatibility(r.Runner, 5, 11); err != nil {
// For using overlayfs
@ -219,7 +225,7 @@ func (r *CRIO) Enable(disOthers, forceSystemd, inUserNamespace bool) error {
}
}
// NOTE: before we start crio explicitly here, crio might be already started automatically
return r.Init.Start("crio")
return r.Init.Restart("crio")
}
// Disable idempotently disables CRIO on a host
@ -356,7 +362,6 @@ func (r *CRIO) KubeletOptions() map[string]string {
"container-runtime": "remote",
"container-runtime-endpoint": r.SocketPath(),
"image-service-endpoint": r.SocketPath(),
"runtime-request-timeout": "15m",
}
}
@ -446,7 +451,7 @@ func (r *CRIO) Preload(cc config.ClusterConfig) error {
if rr, err := r.Runner.RunCmd(exec.Command("sudo", "tar", "-I", "lz4", "-C", "/var", "-xf", dest)); err != nil {
return errors.Wrapf(err, "extracting tarball: %s", rr.Output())
}
klog.Infof("Took %f seconds t extract the tarball", time.Since(t).Seconds())
klog.Infof("Took %f seconds to extract the tarball", time.Since(t).Seconds())
// remove the tarball in the VM
if err := r.Runner.Remove(fa); err != nil {

View File

@ -81,7 +81,7 @@ type Manager interface {
// Version retrieves the current version of this runtime
Version() (string, error)
// Enable idempotently enables this runtime on a host
Enable(bool, bool, bool) error
Enable(bool, string, bool) error
// Disable idempotently disables this runtime on a host
Disable() error
// Active returns whether or not a runtime is active on a host
@ -345,9 +345,35 @@ func ConfigureNetworkPlugin(r Manager, cr CommandRunner, networkPlugin string) e
}
return nil
}
dm, ok := r.(*Docker)
if !ok {
return fmt.Errorf("name and type mismatch")
}
return dockerConfigureNetworkPlugin(*dm, cr, networkPlugin)
return dockerConfigureNetworkPlugin(cr, networkPlugin)
}
// updateCRIDockerdBinary updates cri-dockerd to version
func updateCRIDockerdBinary(cr CommandRunner, version, arch string) error {
curl := fmt.Sprintf("curl -sSfL https://github.com/Mirantis/cri-dockerd/releases/download/v%s/cri-dockerd-%s.%s.tgz | tar -xz -C /tmp", version, version, arch)
if _, err := cr.RunCmd(exec.Command("sudo", "sh", "-c", curl)); err != nil {
return fmt.Errorf("unable to download cri-dockerd version %s: %v", version, err)
}
if _, err := cr.RunCmd(exec.Command("sudo", "chmod", "a+x", "/tmp/cri-dockerd/cri-dockerd")); err != nil {
return fmt.Errorf("unable to chmod cri-dockerd version %s: %v", version, err)
}
if _, err := cr.RunCmd(exec.Command("sudo", "mv", "/tmp/cri-dockerd/cri-dockerd", "/usr/bin/cri-dockerd")); err != nil {
return fmt.Errorf("unable to install cri-dockerd version %s: %v", version, err)
}
return nil
}
// updateContainerdBinary updates containerd to version
func updateContainerdBinary(cr CommandRunner, version, arch string) error {
curl := fmt.Sprintf("curl -sSfL https://github.com/containerd/containerd/releases/download/v%s/containerd-%s-linux-%s.tar.gz | tar -xz -C /tmp", version, version, arch)
if _, err := cr.RunCmd(exec.Command("sudo", "sh", "-c", curl)); err != nil {
return fmt.Errorf("unable to download containerd version %s: %v", version, err)
}
if _, err := cr.RunCmd(exec.Command("sudo", "sh", "-c", "chmod a+x /tmp/bin/*")); err != nil { // note: has to run in subshell because of wildcard!
return fmt.Errorf("unable to chmod containerd version %s: %v", version, err)
}
if _, err := cr.RunCmd(exec.Command("sudo", "sh", "-c", "mv /tmp/bin/* /usr/bin/")); err != nil { // note: has to run in subshell because of wildcard!
return fmt.Errorf("unable to install containerd version %s: %v", version, err)
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More