diff --git a/cmd/minikube/cmd/docker-env.go b/cmd/minikube/cmd/docker-env.go index bebbb7f5e6..b8b96333e1 100644 --- a/cmd/minikube/cmd/docker-env.go +++ b/cmd/minikube/cmd/docker-env.go @@ -199,7 +199,7 @@ func mustRestartDockerd(name string, runner command.Runner) { if err := sysinit.New(runner).Reload("docker"); err != nil { klog.Warningf("will try to restart dockerd because reload failed: %v", err) if err := sysinit.New(runner).Restart("docker"); err != nil { - klog.Warningf("Couldn't restart docker inside minikbue within '%v' because: %v", name, err) + klog.Warningf("Couldn't restart docker inside minikube within '%v' because: %v", name, err) return } // if we get to the point that we have to restart docker (instead of reload) diff --git a/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/config.toml b/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/config.toml index d5de73eae4..12b0d42eb7 100644 --- a/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/config.toml +++ b/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/config.toml @@ -40,8 +40,7 @@ oom_score = 0 [plugins."io.containerd.grpc.v1.cri".containerd] discard_unpacked_layers = true snapshotter = "overlayfs" - [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] - runtime_type = "io.containerd.runc.v2" + default_runtime_name = "runc" [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] runtime_type = "" runtime_engine = "" @@ -54,7 +53,7 @@ oom_score = 0 [plugins."io.containerd.grpc.v1.cri".cni] bin_dir = "/opt/cni/bin" - conf_dir = "/etc/cni/net.mk" + conf_dir = "/etc/cni/net.d" conf_template = "" [plugins."io.containerd.grpc.v1.cri".registry] config_path = "/etc/containerd/certs.d" diff --git a/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/config.toml.default b/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/config.toml.default deleted file mode 100644 index 54a396a435..0000000000 --- a/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/config.toml.default +++ /dev/null @@ -1,131 +0,0 @@ -version = 2 -root = "/var/lib/containerd" -state = "/run/containerd" -plugin_dir = "" -disabled_plugins = [] -required_plugins = [] -oom_score = 0 - -[grpc] - address = "/run/containerd/containerd.sock" - tcp_address = "" - tcp_tls_cert = "" - tcp_tls_key = "" - uid = 0 - gid = 0 - max_recv_message_size = 16777216 - max_send_message_size = 16777216 - -[ttrpc] - address = "" - uid = 0 - gid = 0 - -[debug] - address = "" - uid = 0 - gid = 0 - level = "" - -[metrics] - address = "" - grpc_histogram = false - -[cgroup] - path = "" - -[timeouts] - "io.containerd.timeout.shim.cleanup" = "5s" - "io.containerd.timeout.shim.load" = "5s" - "io.containerd.timeout.shim.shutdown" = "3s" - "io.containerd.timeout.task.state" = "2s" - -[plugins] - [plugins."io.containerd.gc.v1.scheduler"] - pause_threshold = 0.02 - deletion_threshold = 0 - mutation_threshold = 100 - schedule_delay = "0s" - startup_delay = "100ms" - [plugins."io.containerd.grpc.v1.cri"] - disable_tcp_service = true - stream_server_address = "127.0.0.1" - stream_server_port = "0" - stream_idle_timeout = "4h0m0s" - enable_selinux = false - selinux_category_range = 1024 - sandbox_image = "k8s.gcr.io/pause:3.2" - stats_collect_period = 10 - systemd_cgroup = false - enable_tls_streaming = false - max_container_log_line_size = 16384 - disable_cgroup = false - disable_apparmor = false - restrict_oom_score_adj = false - max_concurrent_downloads = 3 - disable_proc_mount = false - unset_seccomp_profile = "" - tolerate_missing_hugetlb_controller = true - disable_hugetlb_controller = true - ignore_image_defined_volumes = false - [plugins."io.containerd.grpc.v1.cri".containerd] - snapshotter = "overlayfs" - default_runtime_name = "runc" - no_pivot = false - disable_snapshot_annotations = true - discard_unpacked_layers = false - [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] - runtime_type = "" - runtime_engine = "" - runtime_root = "" - privileged_without_host_devices = false - base_runtime_spec = "" - [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] - runtime_type = "" - runtime_engine = "" - runtime_root = "" - privileged_without_host_devices = false - base_runtime_spec = "" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - runtime_engine = "" - runtime_root = "" - privileged_without_host_devices = false - base_runtime_spec = "" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - [plugins."io.containerd.grpc.v1.cri".cni] - bin_dir = "/opt/cni/bin" - conf_dir = "/etc/cni/net.d" - max_conf_num = 1 - conf_template = "" - [plugins."io.containerd.grpc.v1.cri".registry] - config_path = "/etc/containerd/certs.d" - [plugins."io.containerd.grpc.v1.cri".image_decryption] - key_model = "" - [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] - tls_cert_file = "" - tls_key_file = "" - [plugins."io.containerd.internal.v1.opt"] - path = "/opt/containerd" - [plugins."io.containerd.internal.v1.restart"] - interval = "10s" - [plugins."io.containerd.metadata.v1.bolt"] - content_sharing_policy = "shared" - [plugins."io.containerd.monitor.v1.cgroups"] - no_prometheus = false - [plugins."io.containerd.runtime.v1.linux"] - shim = "containerd-shim" - runtime = "runc" - runtime_root = "" - no_shim = false - shim_debug = false - [plugins."io.containerd.runtime.v2.task"] - platforms = ["linux/amd64"] - [plugins."io.containerd.service.v1.diff-service"] - default = ["walking"] - [plugins."io.containerd.snapshotter.v1.devmapper"] - root_path = "" - pool_name = "" - base_image_size = "" - async_remove = false diff --git a/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/containerd-bin.hash b/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/containerd-bin.hash index 9894c41be2..e87a009097 100644 --- a/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/containerd-bin.hash +++ b/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/containerd-bin.hash @@ -15,6 +15,6 @@ sha256 85a531725f15e2d136131119d42af4507a5389e0947015152075c4c93816fb5c v1.4.12. sha256 7507913ba169c103ab67bc51bec31cd977d4348d7bc842da32b7eab5f930a14b v1.5.10.tar.gz sha256 02b79d5e2b07b5e64cd28f1fe84395ee11eef95fc49fd923a9ab93022b148be6 v1.5.11.tar.gz sha256 f422e21e35705d1e741c1f3280813e43f811eaff4dcc5cdafac8b8952b15f468 v1.6.4.tar.gz -sha265 27afb673c20d53aa5c31aec07b38eb7e4dc911e7e1f0c76fac9513bbf070bd24 v1.6.6.tar.gz +sha256 27afb673c20d53aa5c31aec07b38eb7e4dc911e7e1f0c76fac9513bbf070bd24 v1.6.6.tar.gz sha256 f5f938513c28377f64f85e84f2750d39f26b01262f3a062b7e8ce35b560ca407 v1.6.8.tar.gz sha256 a034b2273533207d5d96bef8bd3fce1efff85139815efb756d90c705ae1a05ce v1.6.9.tar.gz diff --git a/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/containerd-bin.mk b/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/containerd-bin.mk index 9bf242ea20..b7bd04b523 100644 --- a/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/containerd-bin.mk +++ b/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/containerd-bin.mk @@ -14,6 +14,7 @@ CONTAINERD_BIN_AARCH64_ENV = \ CGO_ENABLED=1 \ GO111MODULE=off \ GOPATH="$(CONTAINERD_BIN_AARCH64_GOPATH)" \ + GOBIN="$(CONTAINERD_BIN_AARCH64_GOPATH)/bin" \ PATH=$(CONTAINERD_BIN_AARCH64_GOPATH)/bin:$(BR_PATH) \ GOARCH=arm64 diff --git a/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/containerd.service b/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/containerd.service index 2d3d1e5ec9..282f34725b 100644 --- a/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/containerd.service +++ b/deploy/iso/minikube-iso/arch/aarch64/package/containerd-bin-aarch64/containerd.service @@ -15,8 +15,9 @@ ExecStart=/usr/bin/containerd \ $CONTAINERD_MINIKUBE_OPTIONS \ --root ${PERSISTENT_DIR}/var/lib/containerd TasksMax=8192 +# ref: https://github.com/containerd/containerd/blob/main/docs/ops.md#systemd Delegate=yes -KillMode=mixed +KillMode=process LimitNOFILE=1048576 # Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting. diff --git a/deploy/iso/minikube-iso/arch/x86_64/package/containerd-bin/config.toml b/deploy/iso/minikube-iso/arch/x86_64/package/containerd-bin/config.toml index e63ad23c34..12b0d42eb7 100644 --- a/deploy/iso/minikube-iso/arch/x86_64/package/containerd-bin/config.toml +++ b/deploy/iso/minikube-iso/arch/x86_64/package/containerd-bin/config.toml @@ -40,8 +40,7 @@ oom_score = 0 [plugins."io.containerd.grpc.v1.cri".containerd] discard_unpacked_layers = true snapshotter = "overlayfs" - [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] - runtime_type = "io.containerd.runc.v2" + default_runtime_name = "runc" [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] runtime_type = "" runtime_engine = "" @@ -54,11 +53,11 @@ oom_score = 0 [plugins."io.containerd.grpc.v1.cri".cni] bin_dir = "/opt/cni/bin" - conf_dir = "/etc/cni/net.mk" + conf_dir = "/etc/cni/net.d" conf_template = "" [plugins."io.containerd.grpc.v1.cri".registry] config_path = "/etc/containerd/certs.d" - + [plugins."io.containerd.service.v1.diff-service"] default = ["walking"] [plugins."io.containerd.gc.v1.scheduler"] diff --git a/deploy/iso/minikube-iso/arch/x86_64/package/containerd-bin/config.toml.default b/deploy/iso/minikube-iso/arch/x86_64/package/containerd-bin/config.toml.default deleted file mode 100644 index 54a396a435..0000000000 --- a/deploy/iso/minikube-iso/arch/x86_64/package/containerd-bin/config.toml.default +++ /dev/null @@ -1,131 +0,0 @@ -version = 2 -root = "/var/lib/containerd" -state = "/run/containerd" -plugin_dir = "" -disabled_plugins = [] -required_plugins = [] -oom_score = 0 - -[grpc] - address = "/run/containerd/containerd.sock" - tcp_address = "" - tcp_tls_cert = "" - tcp_tls_key = "" - uid = 0 - gid = 0 - max_recv_message_size = 16777216 - max_send_message_size = 16777216 - -[ttrpc] - address = "" - uid = 0 - gid = 0 - -[debug] - address = "" - uid = 0 - gid = 0 - level = "" - -[metrics] - address = "" - grpc_histogram = false - -[cgroup] - path = "" - -[timeouts] - "io.containerd.timeout.shim.cleanup" = "5s" - "io.containerd.timeout.shim.load" = "5s" - "io.containerd.timeout.shim.shutdown" = "3s" - "io.containerd.timeout.task.state" = "2s" - -[plugins] - [plugins."io.containerd.gc.v1.scheduler"] - pause_threshold = 0.02 - deletion_threshold = 0 - mutation_threshold = 100 - schedule_delay = "0s" - startup_delay = "100ms" - [plugins."io.containerd.grpc.v1.cri"] - disable_tcp_service = true - stream_server_address = "127.0.0.1" - stream_server_port = "0" - stream_idle_timeout = "4h0m0s" - enable_selinux = false - selinux_category_range = 1024 - sandbox_image = "k8s.gcr.io/pause:3.2" - stats_collect_period = 10 - systemd_cgroup = false - enable_tls_streaming = false - max_container_log_line_size = 16384 - disable_cgroup = false - disable_apparmor = false - restrict_oom_score_adj = false - max_concurrent_downloads = 3 - disable_proc_mount = false - unset_seccomp_profile = "" - tolerate_missing_hugetlb_controller = true - disable_hugetlb_controller = true - ignore_image_defined_volumes = false - [plugins."io.containerd.grpc.v1.cri".containerd] - snapshotter = "overlayfs" - default_runtime_name = "runc" - no_pivot = false - disable_snapshot_annotations = true - discard_unpacked_layers = false - [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] - runtime_type = "" - runtime_engine = "" - runtime_root = "" - privileged_without_host_devices = false - base_runtime_spec = "" - [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] - runtime_type = "" - runtime_engine = "" - runtime_root = "" - privileged_without_host_devices = false - base_runtime_spec = "" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - runtime_engine = "" - runtime_root = "" - privileged_without_host_devices = false - base_runtime_spec = "" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - [plugins."io.containerd.grpc.v1.cri".cni] - bin_dir = "/opt/cni/bin" - conf_dir = "/etc/cni/net.d" - max_conf_num = 1 - conf_template = "" - [plugins."io.containerd.grpc.v1.cri".registry] - config_path = "/etc/containerd/certs.d" - [plugins."io.containerd.grpc.v1.cri".image_decryption] - key_model = "" - [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] - tls_cert_file = "" - tls_key_file = "" - [plugins."io.containerd.internal.v1.opt"] - path = "/opt/containerd" - [plugins."io.containerd.internal.v1.restart"] - interval = "10s" - [plugins."io.containerd.metadata.v1.bolt"] - content_sharing_policy = "shared" - [plugins."io.containerd.monitor.v1.cgroups"] - no_prometheus = false - [plugins."io.containerd.runtime.v1.linux"] - shim = "containerd-shim" - runtime = "runc" - runtime_root = "" - no_shim = false - shim_debug = false - [plugins."io.containerd.runtime.v2.task"] - platforms = ["linux/amd64"] - [plugins."io.containerd.service.v1.diff-service"] - default = ["walking"] - [plugins."io.containerd.snapshotter.v1.devmapper"] - root_path = "" - pool_name = "" - base_image_size = "" - async_remove = false diff --git a/deploy/iso/minikube-iso/arch/x86_64/package/containerd-bin/containerd.service b/deploy/iso/minikube-iso/arch/x86_64/package/containerd-bin/containerd.service index 2d3d1e5ec9..282f34725b 100644 --- a/deploy/iso/minikube-iso/arch/x86_64/package/containerd-bin/containerd.service +++ b/deploy/iso/minikube-iso/arch/x86_64/package/containerd-bin/containerd.service @@ -15,8 +15,9 @@ ExecStart=/usr/bin/containerd \ $CONTAINERD_MINIKUBE_OPTIONS \ --root ${PERSISTENT_DIR}/var/lib/containerd TasksMax=8192 +# ref: https://github.com/containerd/containerd/blob/main/docs/ops.md#systemd Delegate=yes -KillMode=mixed +KillMode=process LimitNOFILE=1048576 # Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting. diff --git a/deploy/kicbase/containerd.toml b/deploy/kicbase/containerd.toml index 98b902d7a1..12b0d42eb7 100644 --- a/deploy/kicbase/containerd.toml +++ b/deploy/kicbase/containerd.toml @@ -1,7 +1,7 @@ version = 2 -root = "/var/lib/containerd" +root = "/var/lib/containerd" state = "/run/containerd" -oom_score = 0 +oom_score = 0 # imports [grpc] @@ -40,8 +40,7 @@ oom_score = 0 [plugins."io.containerd.grpc.v1.cri".containerd] discard_unpacked_layers = true snapshotter = "overlayfs" - [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] - runtime_type = "io.containerd.runc.v2" + default_runtime_name = "runc" [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] runtime_type = "" runtime_engine = "" @@ -54,7 +53,7 @@ oom_score = 0 [plugins."io.containerd.grpc.v1.cri".cni] bin_dir = "/opt/cni/bin" - conf_dir = "/etc/cni/net.mk" + conf_dir = "/etc/cni/net.d" conf_template = "" [plugins."io.containerd.grpc.v1.cri".registry] config_path = "/etc/containerd/certs.d" diff --git a/go.mod b/go.mod index 00ce49e5df..f86d7ae596 100644 --- a/go.mod +++ b/go.mod @@ -32,15 +32,6 @@ require ( github.com/hooklift/iso9660 v0.0.0-20170318115843-1cf07e5970d8 github.com/intel-go/cpuid v0.0.0-20181003105527-1a4a6f06a1c6 // indirect github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f - github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c - github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d // indirect - github.com/juju/fslock v0.0.0-20160525022230-4d5c94c67b4b - github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 // indirect - github.com/juju/mutex v0.0.0-20180619145857-d21b13acf4bf - github.com/juju/retry v0.0.0-20180821225755-9058e192b216 // indirect - github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2 // indirect - github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d // indirect - github.com/juju/version v0.0.0-20180108022336-b64dbd566305 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/klauspost/cpuid v1.2.0 github.com/machine-drivers/docker-machine-driver-vmware v0.1.5 @@ -99,6 +90,9 @@ require ( github.com/docker/cli v20.10.22+incompatible github.com/docker/go-connections v0.4.0 github.com/google/go-github/v43 v43.0.0 + github.com/juju/clock v1.0.2 + github.com/juju/fslock v0.0.0-20160525022230-4d5c94c67b4b + github.com/juju/mutex/v2 v2.0.0 github.com/opencontainers/runc v1.1.4 github.com/santhosh-tekuri/jsonschema/v5 v5.1.1 ) @@ -166,6 +160,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9 // indirect github.com/klauspost/compress v1.15.11 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magiconair/properties v1.8.6 // indirect diff --git a/go.sum b/go.sum index 9bce5a3db9..f2ab1aeb8b 100644 --- a/go.sum +++ b/go.sum @@ -683,24 +683,21 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c h1:3UvYABOQRhJAApj9MdCN+Ydv841ETSoy6xLzdmmr/9A= -github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c/go.mod h1:nD0vlnrUjcjJhqN5WuCWZyzfd5AHZAC9/ajvbSx69xA= -github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d h1:hJXjZMxj0SWlMoQkzeZDLi2cmeiWKa7y1B8Rg+qaoEc= -github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/clock v1.0.2 h1:dJFdUGjtR/76l6U5WLVVI/B3i6+u3Nb9F9s1m+xxrxo= +github.com/juju/clock v1.0.2/go.mod h1:HIBvJ8kiV/n7UHwKuCkdYL4l/MDECztHR2sAvWDxxf0= +github.com/juju/collections v0.0.0-20200605021417-0d0ec82b7271 h1:4R626WTwa7pRYQFiIRLVPepMhm05eZMEx+wIurRnMLc= +github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9 h1:EJHbsNpQyupmMeWTq7inn+5L/WZ7JfzCVPJ+DP9McCQ= +github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9/go.mod h1:TRm7EVGA3mQOqSVcBySRY7a9Y1/gyVhh/WTCnc5sD4U= github.com/juju/fslock v0.0.0-20160525022230-4d5c94c67b4b h1:FQ7+9fxhyp82ks9vAuyPzG0/vVbWwMwLJ+P6yJI5FN8= github.com/juju/fslock v0.0.0-20160525022230-4d5c94c67b4b/go.mod h1:HMcgvsgd0Fjj4XXDkbjdmlbI505rUPBs6WBMYg2pXks= -github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI= -github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= -github.com/juju/mutex v0.0.0-20180619145857-d21b13acf4bf h1:2d3cilQly1OpAfZcn4QRuwDOdVoHsM4cDTkcKbmO760= -github.com/juju/mutex v0.0.0-20180619145857-d21b13acf4bf/go.mod h1:Y3oOzHH8CQ0Ppt0oCKJ2JFO81/EsWenH5AEqigLH+yY= +github.com/juju/loggo v0.0.0-20210728185423-eebad3a902c4 h1:NO5tuyw++EGLnz56Q8KMyDZRwJwWO8jQnj285J3FOmY= +github.com/juju/mgo/v2 v2.0.0-20210302023703-70d5d206e208 h1:/WiCm+Vpj87e4QWuWwPD/bNE9kDrWCLvPBHOQNcG2+A= +github.com/juju/mutex/v2 v2.0.0 h1:rVmJdOaXGWF8rjcFHBNd4x57/1tks5CgXHx55O55SB0= +github.com/juju/mutex/v2 v2.0.0/go.mod h1:jwCfBs/smYDaeZLqeaCi8CB8M+tOes4yf827HoOEoqk= github.com/juju/retry v0.0.0-20180821225755-9058e192b216 h1:/eQL7EJQKFHByJe3DeE8Z36yqManj9UY5zppDoQi4FU= -github.com/juju/retry v0.0.0-20180821225755-9058e192b216/go.mod h1:OohPQGsr4pnxwD5YljhQ+TZnuVRYpa5irjugL1Yuif4= -github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2 h1:Pp8RxiF4rSoXP9SED26WCfNB28/dwTDpPXS8XMJR8rc= -github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= -github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d h1:irPlN9z5VCe6BTsqVsxheCZH99OFSmqSVyTigW4mEoY= -github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d/go.mod h1:6/KLg8Wz/y2KVGWEpkK9vMNGkOnu4k/cqs8Z1fKjTOk= -github.com/juju/version v0.0.0-20180108022336-b64dbd566305 h1:lQxPJ1URr2fjsKnJRt/BxiIxjLt9IKGvS+0injMHbag= -github.com/juju/version v0.0.0-20180108022336-b64dbd566305/go.mod h1:kE8gK5X0CImdr7qpSKl3xB2PmpySSmfj7zVbkZFs81U= +github.com/juju/testing v0.0.0-20220203020004-a0ff61f03494 h1:XEDzpuZb8Ma7vLja3+5hzUqVTvAqm5Y+ygvnDs5iTMM= +github.com/juju/utils/v3 v3.0.0-20220130232349-cd7ecef0e94a h1:5ZWDCeCF0RaITrZGemzmDFIhjR/MVSvBUqgSyaeTMbE= +github.com/juju/version/v2 v2.0.0-20211007103408-2e8da085dc23 h1:wtEPbidt1VyHlb8RSztU6ySQj29FLsOQiI9XiJhXDM4= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= @@ -1688,8 +1685,6 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw= -gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= diff --git a/hack/preload-images/generate.go b/hack/preload-images/generate.go index 0744d3fa1f..92e455e1ea 100644 --- a/hack/preload-images/generate.go +++ b/hack/preload-images/generate.go @@ -31,6 +31,7 @@ import ( "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/detect" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/sysinit" "k8s.io/minikube/pkg/util" @@ -93,7 +94,8 @@ func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string if err != nil { return errors.Wrap(err, "failed create new runtime") } - if err := cr.Enable(true, false, false); err != nil { + + if err := cr.Enable(true, detect.CgroupDriver(), false); err != nil { return errors.Wrap(err, "enable container runtime") } diff --git a/pkg/drivers/kic/oci/network_create.go b/pkg/drivers/kic/oci/network_create.go index aad123867c..674300d45e 100644 --- a/pkg/drivers/kic/oci/network_create.go +++ b/pkg/drivers/kic/oci/network_create.go @@ -145,6 +145,7 @@ func tryCreateDockerNetwork(ociBin string, subnet *network.Parameters, mtu int, rr, err := runCmd(exec.Command(ociBin, args...)) if err != nil { + klog.Errorf("failed to create %s network %s %s with gateway %s and mtu of %d: %v", ociBin, name, subnet.CIDR, subnet.Gateway, mtu, err) // Pool overlaps with other one on this address space if strings.Contains(rr.Output(), "Pool overlaps") { return nil, ErrNetworkSubnetTaken diff --git a/pkg/kapi/kapi.go b/pkg/kapi/kapi.go index 2925751c62..824de949cf 100644 --- a/pkg/kapi/kapi.go +++ b/pkg/kapi/kapi.go @@ -97,7 +97,6 @@ func WaitForPods(c kubernetes.Interface, ns string, selector string, timeOut ... return false, nil } } - return true, nil } t := ReasonableStartTime @@ -223,14 +222,19 @@ func ScaleDeployment(kcontext, namespace, deploymentName string, replicas int) e err = wait.PollImmediate(kconst.APICallRetryInterval, ReasonableMutateTime, func() (bool, error) { scale, err := client.AppsV1().Deployments(namespace).GetScale(context.Background(), deploymentName, meta.GetOptions{}) if err != nil { - klog.Warningf("failed getting deployment scale, will retry: %v", err) + if !IsRetryableAPIError(err) { + return false, fmt.Errorf("non-retryable failure while getting %q deployment scale: %v", deploymentName, err) + } + klog.Warningf("failed getting %q deployment scale, will retry: %v", deploymentName, err) return false, nil } if scale.Spec.Replicas != int32(replicas) { scale.Spec.Replicas = int32(replicas) - _, err = client.AppsV1().Deployments(namespace).UpdateScale(context.Background(), deploymentName, scale, meta.UpdateOptions{}) - if err != nil { - klog.Warningf("failed rescaling deployment, will retry: %v", err) + if _, err = client.AppsV1().Deployments(namespace).UpdateScale(context.Background(), deploymentName, scale, meta.UpdateOptions{}); err != nil { + if !IsRetryableAPIError(err) { + return false, fmt.Errorf("non-retryable failure while rescaling %s deployment: %v", deploymentName, err) + } + klog.Warningf("failed rescaling %s deployment, will retry: %v", deploymentName, err) } // repeat (if change was successful - once again to check & confirm requested scale) return false, nil @@ -238,10 +242,10 @@ func ScaleDeployment(kcontext, namespace, deploymentName string, replicas int) e return true, nil }) if err != nil { - klog.Infof("timed out trying to rescale deployment %q in namespace %q and context %q to %d: %v", deploymentName, namespace, kcontext, replicas, err) + klog.Warningf("failed rescaling %q deployment in %q namespace and %q context to %d replicas: %v", deploymentName, namespace, kcontext, replicas, err) return err } - klog.Infof("deployment %q in namespace %q and context %q rescaled to %d", deploymentName, namespace, kcontext, replicas) + klog.Infof("%q deployment in %q namespace and %q context rescaled to %d replicas", deploymentName, namespace, kcontext, replicas) return nil } diff --git a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta1.go b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta1.go index 8161e47b7b..3e0d021f72 100644 --- a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta1.go +++ b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta1.go @@ -77,6 +77,9 @@ authentication: x509: clientCAFile: {{.ClientCAFile}} cgroupDriver: {{.CgroupDriver}} +{{- range $key, $val := .KubeletConfigOpts}} +{{$key}}: {{$val}} +{{- end}} clusterDomain: "{{if .DNSDomain}}{{.DNSDomain}}{{else}}cluster.local{{end}}" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go index ce6970ff28..fc098623cf 100644 --- a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go +++ b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go @@ -80,6 +80,9 @@ authentication: x509: clientCAFile: {{.ClientCAFile}} cgroupDriver: {{.CgroupDriver}} +{{- range $key, $val := .KubeletConfigOpts}} +{{$key}}: {{$val}} +{{- end}} clusterDomain: "{{if .DNSDomain}}{{.DNSDomain}}{{else}}cluster.local{{end}}" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 112a395de6..780a573dde 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -68,6 +68,13 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana } return nil, errors.Wrap(err, "getting cgroup driver") } + // TODO: investigate why containerd (v1.6.15) does not work with k8s (v1.25.3) when both are set to use systemd cgroup driver + // issue: https://github.com/kubernetes/minikube/issues/15633 + // until this is fixed, the workaround is to configure kubelet to use cgroupfs when containerd is using systemd + // note: pkg/minikube/bootstrapper/bsutil/kubeadm_test.go::TestGenerateKubeadmYAML also extects this override (for now) + if cc.KubernetesConfig.ContainerRuntime == constants.Containerd && cgroupDriver == constants.SystemdCgroupDriver { + cgroupDriver = constants.CgroupfsCgroupDriver + } componentOpts, err := createExtraComponentConfig(k8s.ExtraOptions, version, componentFeatureArgs, cp) if err != nil { @@ -86,6 +93,20 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana } klog.Infof("Using pod CIDR: %s", podCIDR) + // ref: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration + kubeletConfigOpts := kubeletConfigOpts(k8s.ExtraOptions) + // set hairpin mode to hairpin-veth to achieve hairpin NAT, because promiscuous-bridge assumes the existence of a container bridge named cbr0 + // ref: https://kubernetes.io/docs/tasks/debug/debug-application/debug-service/#a-pod-fails-to-reach-itself-via-the-service-ip + kubeletConfigOpts["hairpinMode"] = k8s.ExtraOptions.Get("hairpin-mode", Kubelet) + if kubeletConfigOpts["hairpinMode"] == "" { + kubeletConfigOpts["hairpinMode"] = "hairpin-veth" + } + // set timeout for all runtime requests except long running requests - pull, logs, exec and attach + kubeletConfigOpts["runtimeRequestTimeout"] = k8s.ExtraOptions.Get("runtime-request-timeout", Kubelet) + if kubeletConfigOpts["runtimeRequestTimeout"] == "" { + kubeletConfigOpts["runtimeRequestTimeout"] = "15m" + } + opts := struct { CertDir string ServiceCIDR string @@ -134,7 +155,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana ControlPlaneAddress: constants.ControlPlaneAlias, KubeProxyOptions: createKubeProxyOptions(k8s.ExtraOptions), ResolvConfSearchRegression: HasResolvConfSearchRegression(k8s.KubernetesVersion), - KubeletConfigOpts: kubeletConfigOpts(k8s.ExtraOptions), + KubeletConfigOpts: kubeletConfigOpts, } if k8s.ServiceCIDR != "" { diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go index 8a9548b3b3..13dc81d807 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go @@ -128,7 +128,7 @@ This test case has only 1 thing to test and that is the networking/dnsDomain value */ func TestGenerateKubeadmYAMLDNS(t *testing.T) { - versions, err := recentReleases(0) + versions, err := recentReleases(6) if err != nil { t.Errorf("versions: %v", err) } @@ -212,7 +212,7 @@ func TestGenerateKubeadmYAML(t *testing.T) { fcr.SetCommandToOutput(map[string]string{ "docker info --format {{.CgroupDriver}}": "systemd\n", "crio config": "cgroup_manager = \"systemd\"\n", - "sudo crictl info": "{\"config\": {\"systemdCgroup\": true}}", + "sudo crictl info": "{\"config\": {\"containerd\": {\"runtimes\": {\"runc\": {\"options\": {\"SystemdCgroup\": true}}}}}}", }) tests := []struct { name string @@ -221,13 +221,13 @@ func TestGenerateKubeadmYAML(t *testing.T) { cfg config.ClusterConfig }{ {"default", "docker", false, config.ClusterConfig{Name: "mk"}}, - {"containerd", "containerd", false, config.ClusterConfig{Name: "mk"}}, + {"containerd", "containerd", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ContainerRuntime: constants.Containerd}}}, {"crio", "crio", false, config.ClusterConfig{Name: "mk"}}, {"options", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}}, {"crio-options-gates", "crio", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}}, {"unknown-component", "docker", true, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}}, - {"containerd-api-port", "containerd", false, config.ClusterConfig{Name: "mk", Nodes: []config.Node{{Port: 12345}}}}, - {"containerd-pod-network-cidr", "containerd", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}}, + {"containerd-api-port", "containerd", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ContainerRuntime: constants.Containerd}, Nodes: []config.Node{{Port: 12345}}}}, + {"containerd-pod-network-cidr", "containerd", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ContainerRuntime: constants.Containerd, ExtraOptions: extraOptsPodCidr}}}, {"image-repository", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}}, } for _, version := range versions { diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index b358d06300..1b6019d89b 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -37,8 +37,13 @@ import ( ) // kubeletConfigParams are the only allowed kubelet parameters for kubeadmin config file and not to be used as kubelet flags +// ref: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/ - look for "DEPRECATED" flags +// ref: https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ +// ref: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration var kubeletConfigParams = []string{ "localStorageCapacityIsolation", + "runtime-request-timeout", + "hairpin-mode", } func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manager) (map[string]string, error) { diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go index 8094583a5d..390cbee65f 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go @@ -80,7 +80,7 @@ Wants=crio.service [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.18.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m +ExecStart=/var/lib/minikube/binaries/v1.18.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests [Install] `, @@ -106,7 +106,7 @@ Wants=containerd.service [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.18.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m +ExecStart=/var/lib/minikube/binaries/v1.18.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests [Install] `, @@ -139,7 +139,7 @@ Wants=containerd.service [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.18.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m +ExecStart=/var/lib/minikube/binaries/v1.18.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests [Install] `, diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/api_server.go b/pkg/minikube/bootstrapper/bsutil/kverify/api_server.go index 7b40227e3f..2af2f19136 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/api_server.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/api_server.go @@ -149,7 +149,7 @@ func APIServerVersionMatch(client *kubernetes.Clientset, expected string) error // by container runtime restart for example and there is a gap before it comes back func WaitForAPIServerStatus(cr command.Runner, to time.Duration, hostname string, port int) (state.State, error) { var st state.State - err := wait.PollImmediate(200*time.Millisecond, to, func() (bool, error) { + err := wait.PollImmediate(500*time.Millisecond, to, func() (bool, error) { var err error st, err = APIServerStatus(cr, hostname, port) if st == state.Stopped { diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml index c946a53272..fb571e6a3f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml @@ -51,7 +51,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml index e1f0f3f325..3a5b21779c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml @@ -51,7 +51,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml index 9e1fd6d74b..c7f58af531 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml @@ -51,7 +51,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml index 3663007278..3ea843675f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml @@ -58,6 +58,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml index bc7a440b6c..2e1de3b47c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml @@ -52,6 +52,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml index dc484b5891..ca5d6dc6f4 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml @@ -52,6 +52,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml index 7891a251e1..8633fac685 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml @@ -52,6 +52,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "minikube.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml index 39d7d628f3..79ad87073c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml @@ -53,6 +53,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml index be5e8249eb..afa2f1a703 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml @@ -55,6 +55,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/containerd-api-port.yaml index 7f270f6b7b..a14004cea0 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/containerd-api-port.yaml @@ -51,7 +51,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/containerd-pod-network-cidr.yaml index 2689cfe746..ae78c83e2a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/containerd-pod-network-cidr.yaml @@ -51,7 +51,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/containerd.yaml index 3f4a81576a..08daa24180 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/containerd.yaml @@ -51,7 +51,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/crio-options-gates.yaml index 42f5ce1e6a..a40b5c04e0 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/crio-options-gates.yaml @@ -58,6 +58,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/crio.yaml index 7e15a5b789..2ca61302aa 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/crio.yaml @@ -52,6 +52,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/default.yaml index 2ee50bb1c0..862db516e4 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/default.yaml @@ -52,6 +52,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/dns.yaml index 19bac3f2e7..e23562baa0 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/dns.yaml @@ -52,6 +52,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "minikube.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/image-repository.yaml index d3b14db355..f36591d1ed 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/image-repository.yaml @@ -53,6 +53,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/options.yaml index 41951675a9..74bbe84b11 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.20/options.yaml @@ -55,6 +55,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/containerd-api-port.yaml index ca06be4676..09b0971170 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/containerd-api-port.yaml @@ -51,7 +51,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/containerd-pod-network-cidr.yaml index 49d1d4c253..057a947793 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/containerd-pod-network-cidr.yaml @@ -51,7 +51,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/containerd.yaml index 20c133ade3..34c8142fd6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/containerd.yaml @@ -51,7 +51,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/crio-options-gates.yaml index 90cbcd5a64..71258a5bcd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/crio-options-gates.yaml @@ -58,6 +58,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/crio.yaml index 3fb371631c..3f2f1ce536 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/crio.yaml @@ -52,6 +52,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/default.yaml index 63dda5e2d1..7d97ab09f6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/default.yaml @@ -52,6 +52,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/dns.yaml index 486cb139a3..6dd304998d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/dns.yaml @@ -52,6 +52,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "minikube.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/image-repository.yaml index 41171d19e3..b60d5647a6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/image-repository.yaml @@ -53,6 +53,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/options.yaml index 0ae6233719..c7e3535316 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.21/options.yaml @@ -55,6 +55,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/containerd-api-port.yaml index a6f66ca3fc..03378418da 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/containerd-api-port.yaml @@ -51,7 +51,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/containerd-pod-network-cidr.yaml index be1449f67c..fc0fde88d5 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/containerd-pod-network-cidr.yaml @@ -51,7 +51,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/containerd.yaml index 17add873e8..cfbdaed96e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/containerd.yaml @@ -51,7 +51,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/crio-options-gates.yaml index 675695d35c..f24e17f7ce 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/crio-options-gates.yaml @@ -58,6 +58,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/crio.yaml index c4cdca797b..c7b569de1a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/crio.yaml @@ -52,6 +52,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/default.yaml index 41c52cead9..95cc1f7b35 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/default.yaml @@ -52,6 +52,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/dns.yaml index 6ad1aece47..7ea1313b05 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/dns.yaml @@ -52,6 +52,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "minikube.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/image-repository.yaml index 6eea4cd782..d85c27dbca 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/image-repository.yaml @@ -53,6 +53,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/options.yaml index 0869698959..75e8edf30a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.22/options.yaml @@ -55,6 +55,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/containerd-api-port.yaml index 8ba73ba262..baa211c625 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/containerd-api-port.yaml @@ -49,7 +49,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/containerd-pod-network-cidr.yaml index 329588b9a1..a0604056ec 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/containerd-pod-network-cidr.yaml @@ -49,7 +49,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/containerd.yaml index b9598d7fe8..4ca004be58 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/containerd.yaml @@ -49,7 +49,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/crio-options-gates.yaml index 6b220ff965..8f06d0c9bf 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/crio-options-gates.yaml @@ -56,6 +56,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/crio.yaml index f986859773..8e95541b6c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/crio.yaml @@ -50,6 +50,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/default.yaml index 05a6d75980..5c4ee61fbd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/default.yaml @@ -50,6 +50,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/dns.yaml index 4343f08828..3387cb9a3d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/dns.yaml @@ -50,6 +50,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "minikube.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/image-repository.yaml index 25b3e44885..df980fb545 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/image-repository.yaml @@ -51,6 +51,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/options.yaml index b44de0a03c..b608822891 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.23/options.yaml @@ -53,6 +53,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/containerd-api-port.yaml index c0d6a2e107..070746c3f5 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/containerd-api-port.yaml @@ -49,7 +49,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/containerd-pod-network-cidr.yaml index ad0f6e2d5f..d984ac1ae0 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/containerd-pod-network-cidr.yaml @@ -49,7 +49,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/containerd.yaml index b57437de99..6bd3c29cff 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/containerd.yaml @@ -49,7 +49,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/crio-options-gates.yaml index c071093bf1..d11d3e77a8 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/crio-options-gates.yaml @@ -56,6 +56,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/crio.yaml index e610321bef..bb4fe9c470 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/crio.yaml @@ -50,6 +50,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/default.yaml index a7d163f7ae..357b86a2e1 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/default.yaml @@ -50,6 +50,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/dns.yaml index f304a30dd2..b589145b10 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/dns.yaml @@ -50,6 +50,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "minikube.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/image-repository.yaml index 232c8785ab..660efe8242 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/image-repository.yaml @@ -51,6 +51,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/options.yaml index 71046482a4..1e824985e0 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.24/options.yaml @@ -53,6 +53,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/containerd-api-port.yaml index 9dd0cf7182..7d8426cec9 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/containerd-api-port.yaml @@ -49,7 +49,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/containerd-pod-network-cidr.yaml index 0852428188..51898a2f39 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/containerd-pod-network-cidr.yaml @@ -49,7 +49,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/containerd.yaml index 7b29b31b96..9a4ebdf597 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/containerd.yaml @@ -49,7 +49,9 @@ kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt -cgroupDriver: systemd +cgroupDriver: cgroupfs +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/crio-options-gates.yaml index f30daf411f..a7c48c4492 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/crio-options-gates.yaml @@ -56,6 +56,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/crio.yaml index f1446b644a..7915ee5350 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/crio.yaml @@ -50,6 +50,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/default.yaml index c03e0657c0..b3463c08f7 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/default.yaml @@ -50,6 +50,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/dns.yaml index d8c7e3ac99..84ca57799c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/dns.yaml @@ -50,6 +50,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "minikube.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/image-repository.yaml index 327ecc65d7..b6fe947d09 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/image-repository.yaml @@ -51,6 +51,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/options.yaml index e215c9f955..e75fc3e18b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.25/options.yaml @@ -53,6 +53,8 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +hairpinMode: hairpin-veth +runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index e86641ee04..8b4b11b763 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -30,6 +30,7 @@ import ( "strings" "time" + "github.com/juju/mutex/v2" "github.com/otiai10/copy" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" @@ -46,6 +47,7 @@ import ( "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util" + "k8s.io/minikube/pkg/util/lock" ) // SetupCerts gets the generated credentials required to talk to the APIServer. @@ -177,6 +179,17 @@ func generateSharedCACerts() (CACerts, bool, error) { }, } + // create a lock for "ca-certs" to avoid race condition over multiple minikube instances rewriting shared ca certs + hold := filepath.Join(globalPath, "ca-certs") + spec := lock.PathMutexSpec(hold) + spec.Timeout = 1 * time.Minute + klog.Infof("acquiring lock for shared ca certs: %+v", spec) + releaser, err := mutex.Acquire(spec) + if err != nil { + return cc, false, errors.Wrapf(err, "unable to acquire lock for shared ca certs %+v", spec) + } + defer releaser.Release() + for _, ca := range caCertSpecs { if isValid(ca.certPath, ca.keyPath) { klog.Infof("skipping %s CA generation: %s", ca.subject, ca.keyPath) diff --git a/pkg/minikube/bootstrapper/images/images.go b/pkg/minikube/bootstrapper/images/images.go index 4237d4cac3..72299e80df 100644 --- a/pkg/minikube/bootstrapper/images/images.go +++ b/pkg/minikube/bootstrapper/images/images.go @@ -174,7 +174,7 @@ func KindNet(repo string) string { } // all calico images are from https://docs.projectcalico.org/manifests/calico.yaml -const calicoVersion = "v3.20.0" +const calicoVersion = "v3.24.5" const calicoRepo = "docker.io/calico" // CalicoDaemonSet returns the image used for calicoDaemonSet @@ -188,11 +188,6 @@ func CalicoDeployment(repo string) string { return calicoCommon(repo, "kube-controllers") } -// CalicoFelixDriver returns image used for felix driver -func CalicoFelixDriver(repo string) string { - return calicoCommon(repo, "pod2daemon-flexvol") -} - // CalicoBin returns image used for calico binary image func CalicoBin(repo string) string { return calicoCommon(repo, "cni") diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 064d68a520..0af629784d 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -601,7 +601,8 @@ func (k *Bootstrapper) needsReconfigure(conf string, hostname string, port int, } // cruntime.Enable() may restart kube-apiserver but does not wait for it to return back - apiStatusTimeout := 3000 * time.Millisecond + // could take five-ish seconds, so hopefully 10 seconds is sufficient to wait for api server to come back up + apiStatusTimeout := 10 * time.Second st, err := kverify.WaitForAPIServerStatus(k.c, apiStatusTimeout, hostname, port) if err != nil { klog.Infof("needs reconfigure: apiserver error: %v", err) @@ -1117,7 +1118,7 @@ func (k *Bootstrapper) elevateKubeSystemPrivileges(cfg config.ClusterConfig) err // stopKubeSystem stops all the containers in the kube-system to prevent #8740 when doing hot upgrade func (k *Bootstrapper) stopKubeSystem(cfg config.ClusterConfig) error { klog.Info("stopping kube-system containers ...") - cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) + cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Socket: cfg.KubernetesConfig.CRISocket, Runner: k.c}) if err != nil { return errors.Wrap(err, "new cruntime") } diff --git a/pkg/minikube/cni/bridge.go b/pkg/minikube/cni/bridge.go index eb736abe2d..48a0acb6fe 100644 --- a/pkg/minikube/cni/bridge.go +++ b/pkg/minikube/cni/bridge.go @@ -28,8 +28,11 @@ import ( ) // bridge is what minikube defaulted to when `--enable-default-cni=true` -// https://github.com/containernetworking/plugins/blob/master/plugins/main/bridge/README.md +// ref: https://www.cni.dev/plugins/current/main/bridge/ +// ref: https://www.cni.dev/plugins/current/meta/portmap/ +// note: "cannot set hairpin mode and promiscuous mode at the same time" +// ref: https://github.com/containernetworking/plugins/blob/7e9ada51e751740541969e1ea5a803cbf45adcf3/plugins/main/bridge/bridge.go#L424 var bridgeConf = template.Must(template.New("bridge").Parse(` { "cniVersion": "0.3.1", diff --git a/pkg/minikube/cni/calico.go b/pkg/minikube/cni/calico.go index 9db610400b..6f7b55c4b7 100644 --- a/pkg/minikube/cni/calico.go +++ b/pkg/minikube/cni/calico.go @@ -46,9 +46,9 @@ type Calico struct { } type calicoTmplStruct struct { + PodCIDR string DeploymentImageName string DaemonSetImageName string - FelixDriverImageName string BinaryImageName string LegacyPodDisruptionBudget bool } @@ -66,9 +66,9 @@ func (c Calico) manifest() (assets.CopyableFile, error) { } input := &calicoTmplStruct{ + PodCIDR: DefaultPodCIDR, DeploymentImageName: images.CalicoDeployment(c.cc.KubernetesConfig.ImageRepository), DaemonSetImageName: images.CalicoDaemonSet(c.cc.KubernetesConfig.ImageRepository), - FelixDriverImageName: images.CalicoFelixDriver(c.cc.KubernetesConfig.ImageRepository), BinaryImageName: images.CalicoBin(c.cc.KubernetesConfig.ImageRepository), LegacyPodDisruptionBudget: k8sVersion.LT(semver.Version{Major: 1, Minor: 25}), } diff --git a/pkg/minikube/cni/calico.yaml b/pkg/minikube/cni/calico.yaml index c357997113..4b344849b8 100644 --- a/pkg/minikube/cni/calico.yaml +++ b/pkg/minikube/cni/calico.yaml @@ -1,4 +1,34 @@ --- +# Source: calico/templates/calico-kube-controllers.yaml +# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict + +apiVersion: policy/v1{{if .LegacyPodDisruptionBudget}}beta1{{end}} +kind: PodDisruptionBudget +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers +--- +# Source: calico/templates/calico-kube-controllers.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system +--- +# Source: calico/templates/calico-node.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system +--- # Source: calico/templates/calico-config.yaml # This ConfigMap is used to configure a self-hosted Calico installation. kind: ConfigMap @@ -52,10 +82,8 @@ data: } ] } - --- # Source: calico/templates/kdd-crds.yaml - apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -67,6 +95,7 @@ spec: listKind: BGPConfigurationList plural: bgpconfigurations singular: bgpconfiguration + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -94,6 +123,12 @@ spec: 64512]' format: int32 type: integer + bindMode: + description: BindMode indicates whether to listen for BGP connections + on all addresses (None) or only on the node's canonical IP address + Node.Spec.BGP.IPvXAddress (NodeIP). Default behaviour is to listen + for BGP connections on all addresses. + type: string communities: description: Communities is a list of BGP community values and their arbitrary names for tagging routes. @@ -124,6 +159,37 @@ spec: description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: INFO]' type: string + nodeMeshMaxRestartTime: + description: Time to allow for software restart for node-to-mesh peerings. When + specified, this is configured as the graceful restart timeout. When + not specified, the BIRD default of 120s is used. This field can + only be set on the default BGPConfiguration instance and requires + that NodeMesh is enabled + type: string + nodeMeshPassword: + description: Optional BGP password for full node-to-mesh peerings. + This field can only be set on the default BGPConfiguration instance + and requires that NodeMesh is enabled + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object nodeToNodeMeshEnabled: description: 'NodeToNodeMeshEnabled sets whether full node to node BGP mesh is enabled. [Default: true]' @@ -197,8 +263,8 @@ status: plural: "" conditions: [] storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -210,6 +276,7 @@ spec: listKind: BGPPeerList plural: bgppeers singular: bgppeer + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -242,8 +309,8 @@ spec: in the specific branch of the Node on "bird.cfg". type: boolean maxRestartTime: - description: Time to allow for software restart. When specified, this - is configured as the graceful restart timeout. When not specified, + description: Time to allow for software restart. When specified, + this is configured as the graceful restart timeout. When not specified, the BIRD default of 120s is used. type: string node: @@ -255,6 +322,12 @@ spec: description: Selector for the nodes that should have this peering. When this is set, the Node field must be empty. type: string + numAllowedLocalASNumbers: + description: Maximum number of local AS numbers that are allowed in + the AS path for received routes. This removes BGP loop prevention + and should only be used if absolutely necesssary. + format: int32 + type: integer password: description: Optional BGP password for the peerings generated by this BGPPeer resource. @@ -310,8 +383,8 @@ status: plural: "" conditions: [] storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -323,6 +396,7 @@ spec: listKind: BlockAffinityList plural: blockaffinities singular: blockaffinity + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -371,8 +445,272 @@ status: plural: "" conditions: [] storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: caliconodestatuses.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: CalicoNodeStatus + listKind: CalicoNodeStatusList + plural: caliconodestatuses + singular: caliconodestatus + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus + resource. + properties: + classes: + description: Classes declares the types of information to monitor + for this calico/node, and allows for selective status reporting + about certain subsets of information. + items: + type: string + type: array + node: + description: The node name identifies the Calico node instance for + node status. + type: string + updatePeriodSeconds: + description: UpdatePeriodSeconds is the period at which CalicoNodeStatus + should be updated. Set to 0 to disable CalicoNodeStatus refresh. + Maximum update period is one day. + format: int32 + type: integer + type: object + status: + description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus. + No validation needed for status since it is updated by Calico. + properties: + agent: + description: Agent holds agent status on the node. + properties: + birdV4: + description: BIRDV4 represents the latest observed status of bird4. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + birdV6: + description: BIRDV6 represents the latest observed status of bird6. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + type: object + bgp: + description: BGP holds node BGP status. + properties: + numberEstablishedV4: + description: The total number of IPv4 established bgp sessions. + type: integer + numberEstablishedV6: + description: The total number of IPv6 established bgp sessions. + type: integer + numberNotEstablishedV4: + description: The total number of IPv4 non-established bgp sessions. + type: integer + numberNotEstablishedV6: + description: The total number of IPv6 non-established bgp sessions. + type: integer + peersV4: + description: PeersV4 represents IPv4 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + peersV6: + description: PeersV6 represents IPv6 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + required: + - numberEstablishedV4 + - numberEstablishedV6 + - numberNotEstablishedV4 + - numberNotEstablishedV6 + type: object + lastUpdated: + description: LastUpdated is a timestamp representing the server time + when CalicoNodeStatus object last updated. It is represented in + RFC3339 form and is in UTC. + format: date-time + nullable: true + type: string + routes: + description: Routes reports routes known to the Calico BGP daemon + on the node. + properties: + routesV4: + description: RoutesV4 represents IPv4 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + routesV6: + description: RoutesV6 represents IPv6 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -384,6 +722,7 @@ spec: listKind: ClusterInformationList plural: clusterinformations singular: clusterinformation + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -435,8 +774,8 @@ status: plural: "" conditions: [] storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -448,6 +787,7 @@ spec: listKind: FelixConfigurationList plural: felixconfigurations singular: felixconfiguration + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -482,7 +822,7 @@ spec: type: boolean awsSrcDstCheck: description: 'Set source-destination-check on AWS EC2 instances. Accepted - value must be one of "DoNothing", "Enabled" or "Disabled". [Default: + value must be one of "DoNothing", "Enable" or "Disable". [Default: DoNothing]' enum: - DoNothing @@ -516,6 +856,18 @@ spec: description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. [Default: false]' type: boolean + bpfEnforceRPF: + description: 'BPFEnforceRPF enforce strict RPF on all interfaces with + BPF programs regardless of what is the per-interfaces or global + setting. Possible values are Disabled or Strict. [Default: Strict]' + type: string + bpfExtToServiceConnmark: + description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit + mark that is set on connections from an external client to a local + service. This mark allows us to control how packets of that connection + are routed within the host and how is routing interpreted by RPF + check. [Default: 0]' + type: integer bpfExternalServiceMode: description: 'BPFExternalServiceMode in BPF mode, controls how connections from outside the cluster to services (node ports and cluster IPs) @@ -526,14 +878,11 @@ spec: node appears to use the IP of the ingress node; this requires a permissive L2 network. [Default: Tunnel]' type: string - bpfExtToServiceConnmark: - description: 'BPFExtToServiceConnmark in BPF mode, controls a - 32bit mark that is set on connections from an external client to - a local service. This mark allows us to control how packets of - that connection are routed within the host and how is routing - interpreted by RPF check. [Default: 0]' - type: integer - + bpfHostConntrackBypass: + description: 'BPFHostConntrackBypass Controls whether to bypass Linux + conntrack in BPF mode for workloads and services. [Default: true + - bypass Linux conntrack]' + type: boolean bpfKubeProxyEndpointSlicesEnabled: description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls whether Felix's embedded kube-proxy accepts EndpointSlices or not. @@ -556,6 +905,61 @@ spec: logs are emitted to the BPF trace pipe, accessible with the command `tc exec bpf debug`. [Default: Off].' type: string + bpfMapSizeConntrack: + description: 'BPFMapSizeConntrack sets the size for the conntrack + map. This map must be large enough to hold an entry for each active + connection. Warning: changing the size of the conntrack map can + cause disruption.' + type: integer + bpfMapSizeIPSets: + description: BPFMapSizeIPSets sets the size for ipsets map. The IP + sets map must be large enough to hold an entry for each endpoint + matched by every selector in the source/destination matches in network + policy. Selectors such as "all()" can result in large numbers of + entries (one entry per endpoint in that case). + type: integer + bpfMapSizeIfState: + description: BPFMapSizeIfState sets the size for ifstate map. The + ifstate map must be large enough to hold an entry for each device + (host + workloads) on a host. + type: integer + bpfMapSizeNATAffinity: + type: integer + bpfMapSizeNATBackend: + description: BPFMapSizeNATBackend sets the size for nat back end map. + This is the total number of endpoints. This is mostly more than + the size of the number of services. + type: integer + bpfMapSizeNATFrontend: + description: BPFMapSizeNATFrontend sets the size for nat front end + map. FrontendMap should be large enough to hold an entry for each + nodeport, external IP and each port in each service. + type: integer + bpfMapSizeRoute: + description: BPFMapSizeRoute sets the size for the routes map. The + routes map should be large enough to hold one entry per workload + and a handful of entries per host (enough to cover its own IPs and + tunnel IPs). + type: integer + bpfPSNATPorts: + anyOf: + - type: integer + - type: string + description: 'BPFPSNATPorts sets the range from which we randomly + pick a port if there is a source port collision. This should be + within the ephemeral range as defined by RFC 6056 (1024–65535) and + preferably outside the ephemeral ranges used by common operating + systems. Linux uses 32768–60999, while others mostly use the IANA + defined range 49152–65535. It is not necessarily a problem if this + range overlaps with the operating systems. Both ends of the range + are inclusive. [Default: 20000:29999]' + pattern: ^.* + x-kubernetes-int-or-string: true + bpfPolicyDebugEnabled: + description: BPFPolicyDebugEnabled when true, Felix records detailed + information about the BPF policy programs, which can be examined + with the calico-bpf command-line tool. + type: boolean chainInsertMode: description: 'ChainInsertMode controls whether Felix hooks the kernel''s top-level iptables chains by inserting a rule at the top of the @@ -566,6 +970,15 @@ spec: Calico policy will be bypassed. [Default: insert]' type: string dataplaneDriver: + description: DataplaneDriver filename of the external dataplane driver + to use. Only used if UseInternalDataplaneDriver is set to false. + type: string + dataplaneWatchdogTimeout: + description: 'DataplaneWatchdogTimeout is the readiness/liveness timeout + used for Felix''s (internal) dataplane driver. Increase this value + if you experience spurious non-ready or non-live events when Felix + is under heavy load. Decrease the value to get felix to report non-live + or non-ready more quickly. [Default: 90s]' type: string debugDisableLogDropping: type: boolean @@ -594,9 +1007,14 @@ spec: routes, by default this will be RTPROT_BOOT when left blank. type: integer deviceRouteSourceAddress: - description: This is the source address to use on programmed device - routes. By default the source address is left blank, leaving the - kernel to choose the source address used. + description: This is the IPv4 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + deviceRouteSourceAddressIPv6: + description: This is the IPv6 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. type: string disableConntrackInvalidCheck: type: boolean @@ -670,6 +1088,13 @@ spec: "true" or "false" will force the feature, empty or omitted values are auto-detected. type: string + floatingIPs: + description: FloatingIPs configures whether or not Felix will program + floating IP addresses. + enum: + - Enabled + - Disabled + type: string genericXDPEnabled: description: 'GenericXDPEnabled enables Generic XDP so network cards that don''t support XDP offload or driver modes can use XDP. This @@ -707,6 +1132,9 @@ spec: disabled by setting the interval to 0. type: string ipipEnabled: + description: 'IPIPEnabled overrides whether Felix should configure + an IPIP interface on the host. Optional as Felix determines this + based on the existing IP pools. [Default: nil (unset)]' type: boolean ipipMTU: description: 'IPIPMTU is the MTU to set on the tunnel device. See @@ -773,6 +1201,8 @@ spec: usage. [Default: 10s]' type: string ipv6Support: + description: IPv6Support controls whether Felix enables support for + IPv6 (if supported by the in-use dataplane). type: boolean kubeNodePortRanges: description: 'KubeNodePortRanges holds list of port ranges used for @@ -786,6 +1216,12 @@ spec: pattern: ^.* x-kubernetes-int-or-string: true type: array + logDebugFilenameRegex: + description: LogDebugFilenameRegex controls which source code files + have their Debug log output included in the logs. Only logs from + files with names that match the given regular expression are included. The + filter only applies to Debug level logs. + type: string logFilePath: description: 'LogFilePath is the full path to the Felix log. Set to none to disable file logging. [Default: /var/log/calico/felix.log]' @@ -882,6 +1318,12 @@ spec: to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true]' type: boolean + prometheusWireGuardMetricsEnabled: + description: 'PrometheusWireGuardMetricsEnabled disables wireguard + metrics collection, which the Prometheus client does by default, + when set to false. This reduces the number of metrics reported, + reducing Prometheus load. [Default: true]' + type: boolean removeExternalRoutes: description: Whether or not to remove device routes that have not been programmed by Felix. Disabling this will allow external applications @@ -908,10 +1350,14 @@ spec: information. - WorkloadIPs: use workload endpoints to construct routes. - CalicoIPAM: the default - use IPAM data to construct routes.' type: string + routeSyncDisabled: + description: RouteSyncDisabled will disable all operations performed + on the route table. Set to true to run in network-policy mode only. + type: boolean routeTableRange: - description: Calico programs additional Linux route tables for various - purposes. RouteTableRange specifies the indices of the route tables - that Calico should use. + description: Deprecated in favor of RouteTableRanges. Calico programs + additional Linux route tables for various purposes. RouteTableRange + specifies the indices of the route tables that Calico should use. properties: max: type: integer @@ -921,6 +1367,21 @@ spec: - max - min type: object + routeTableRanges: + description: Calico programs additional Linux route tables for various + purposes. RouteTableRanges specifies a set of table index ranges + that Calico should use. Deprecates`RouteTableRange`, overrides `RouteTableRange`. + items: + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + type: array serviceLoopPrevention: description: 'When service IP advertisement is enabled, prevent routing loops to service IPs that are not in use, by dropping or rejecting @@ -948,37 +1409,79 @@ spec: Felix makes reports. [Default: 86400s]' type: string useInternalDataplaneDriver: + description: UseInternalDataplaneDriver, if true, Felix will use its + internal dataplane programming logic. If false, it will launch + an external dataplane driver and communicate with it over protobuf. type: boolean vxlanEnabled: + description: 'VXLANEnabled overrides whether Felix should create the + VXLAN tunnel device for IPv4 VXLAN networking. Optional as Felix + determines this based on the existing IP pools. [Default: nil (unset)]' type: boolean vxlanMTU: - description: 'VXLANMTU is the MTU to set on the tunnel device. See - Configuring MTU [Default: 1440]' + description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel + device. See Configuring MTU [Default: 1410]' + type: integer + vxlanMTUV6: + description: 'VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel + device. See Configuring MTU [Default: 1390]' type: integer vxlanPort: type: integer vxlanVNI: type: integer wireguardEnabled: - description: 'WireguardEnabled controls whether Wireguard is enabled. + description: 'WireguardEnabled controls whether Wireguard is enabled + for IPv4 (encapsulating IPv4 traffic over an IPv4 underlay network). [Default: false]' type: boolean + wireguardEnabledV6: + description: 'WireguardEnabledV6 controls whether Wireguard is enabled + for IPv6 (encapsulating IPv6 traffic over an IPv6 underlay network). + [Default: false]' + type: boolean + wireguardHostEncryptionEnabled: + description: 'WireguardHostEncryptionEnabled controls whether Wireguard + host-to-host encryption is enabled. [Default: false]' + type: boolean wireguardInterfaceName: description: 'WireguardInterfaceName specifies the name to use for - the Wireguard interface. [Default: wg.calico]' + the IPv4 Wireguard interface. [Default: wireguard.cali]' + type: string + wireguardInterfaceNameV6: + description: 'WireguardInterfaceNameV6 specifies the name to use for + the IPv6 Wireguard interface. [Default: wg-v6.cali]' + type: string + wireguardKeepAlive: + description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive + option. Set 0 to disable. [Default: 0]' type: string wireguardListeningPort: description: 'WireguardListeningPort controls the listening port used - by Wireguard. [Default: 51820]' + by IPv4 Wireguard. [Default: 51820]' + type: integer + wireguardListeningPortV6: + description: 'WireguardListeningPortV6 controls the listening port + used by IPv6 Wireguard. [Default: 51821]' type: integer wireguardMTU: - description: 'WireguardMTU controls the MTU on the Wireguard interface. - See Configuring MTU [Default: 1420]' + description: 'WireguardMTU controls the MTU on the IPv4 Wireguard + interface. See Configuring MTU [Default: 1440]' + type: integer + wireguardMTUV6: + description: 'WireguardMTUV6 controls the MTU on the IPv6 Wireguard + interface. See Configuring MTU [Default: 1420]' type: integer wireguardRoutingRulePriority: description: 'WireguardRoutingRulePriority controls the priority value to use for the Wireguard routing rule. [Default: 99]' type: integer + workloadSourceSpoofing: + description: WorkloadSourceSpoofing controls whether pods can use + the allowedSourcePrefixes annotation to send traffic with a source + IP address that is not theirs. This is disabled by default. When + set to "Any", pods can request any prefix. + type: string xdpEnabled: description: 'XDPEnabled enables XDP acceleration for suitable untracked incoming deny rules. [Default: true]' @@ -999,8 +1502,8 @@ status: plural: "" conditions: [] storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -1012,6 +1515,7 @@ spec: listKind: GlobalNetworkPolicyList plural: globalnetworkpolicies singular: globalnetworkpolicy + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -1172,8 +1676,8 @@ spec: within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, - Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n - Only valid on egress rules." + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes @@ -1398,8 +1902,8 @@ spec: within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, - Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n - Only valid on egress rules." + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes @@ -1545,8 +2049,8 @@ spec: within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, - Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n - Only valid on egress rules." + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes @@ -1771,8 +2275,8 @@ spec: within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, - Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n - Only valid on egress rules." + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes @@ -1854,8 +2358,8 @@ status: plural: "" conditions: [] storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -1867,6 +2371,7 @@ spec: listKind: GlobalNetworkSetList plural: globalnetworksets singular: globalnetworkset + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -1907,8 +2412,8 @@ status: plural: "" conditions: [] storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -1920,6 +2425,7 @@ spec: listKind: HostEndpointList plural: hostendpoints singular: hostendpoint + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -2015,8 +2521,8 @@ status: plural: "" conditions: [] storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -2028,6 +2534,7 @@ spec: listKind: IPAMBlockList plural: ipamblocks singular: ipamblock + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -2051,8 +2558,16 @@ spec: resource. properties: affinity: + description: Affinity of the block, if this block has one. If set, + it will be of the form "host:". If not set, this block + is not affine to a host. type: string allocations: + description: Array of allocations in-use within this block. nil entries + mean the allocation is free. For non-nil entries at index i, the + index is the ordinal of the allocation within this block and the + value is the index of the associated attributes in the Attributes + array. items: type: integer # TODO: This nullable is manually added in. We should update controller-gen @@ -2060,6 +2575,10 @@ spec: nullable: true type: array attributes: + description: Attributes is an array of arbitrary metadata associated + with allocations in the block. To find attributes for a given allocation, + use the value of the allocation's entry in the Allocations array + as the index of the element in this array. items: properties: handle_id: @@ -2071,12 +2590,38 @@ spec: type: object type: array cidr: + description: The block's CIDR. type: string deleted: + description: Deleted is an internal boolean used to workaround a limitation + in the Kubernetes API whereby deletion will not return a conflict + error if the block has been updated. It should not be set manually. type: boolean + sequenceNumber: + default: 0 + description: We store a sequence number that is updated each time + the block is written. Each allocation will also store the sequence + number of the block at the time of its creation. When releasing + an IP, passing the sequence number associated with the allocation + allows us to protect against a race condition and ensure the IP + hasn't been released and re-allocated since the release request. + format: int64 + type: integer + sequenceNumberForAllocation: + additionalProperties: + format: int64 + type: integer + description: Map of allocated ordinal within the block to sequence + number of the block at the time of allocation. Kubernetes does not + allow numerical keys for maps, so the key is cast to a string. + type: object strictAffinity: + description: StrictAffinity on the IPAMBlock is deprecated and no + longer used by the code. Use IPAMConfig StrictAffinity instead. type: boolean unallocated: + description: Unallocated is an ordered list of allocations which are + free in the block. items: type: integer type: array @@ -2096,8 +2641,8 @@ status: plural: "" conditions: [] storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -2109,6 +2654,7 @@ spec: listKind: IPAMConfigList plural: ipamconfigs singular: ipamconfig + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -2136,6 +2682,8 @@ spec: maxBlocksPerHost: description: MaxBlocksPerHost, if non-zero, is the max number of blocks that can be affine to each host. + maximum: 2147483647 + minimum: 0 type: integer strictAffinity: type: boolean @@ -2152,8 +2700,8 @@ status: plural: "" conditions: [] storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -2165,6 +2713,7 @@ spec: listKind: IPAMHandleList plural: ipamhandles singular: ipamhandle + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -2208,8 +2757,8 @@ status: plural: "" conditions: [] storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -2221,6 +2770,7 @@ spec: listKind: IPPoolList plural: ippools singular: ippool + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -2242,13 +2792,23 @@ spec: spec: description: IPPoolSpec contains the specification for an IPPool resource. properties: + allowedUses: + description: AllowedUse controls what the IP pool will be used for. If + not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility + items: + type: string + type: array blockSize: description: The block size to use for IP address assignments from - this pool. Defaults to 26 for IPv4 and 112 for IPv6. + this pool. Defaults to 26 for IPv4 and 122 for IPv6. type: integer cidr: description: The pool CIDR. type: string + disableBGPExport: + description: 'Disable exporting routes from this IP Pool''s CIDR over + BGP. [Default: false]' + type: boolean disabled: description: When disabled is true, Calico IPAM will not assign addresses from this pool. @@ -2282,7 +2842,7 @@ spec: for internal use only.' type: boolean natOutgoing: - description: When nat-outgoing is true, packets sent from Calico networked + description: When natOutgoing is true, packets sent from Calico networked containers in this pool to destinations outside of this pool will be masqueraded. type: boolean @@ -2307,8 +2867,63 @@ status: plural: "" conditions: [] storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: ipreservations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPReservation + listKind: IPReservationList + plural: ipreservations + singular: ipreservation + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPReservationSpec contains the specification for an IPReservation + resource. + properties: + reservedCIDRs: + description: ReservedCIDRs is a list of CIDRs and/or IP addresses + that Calico IPAM will exclude from new allocations. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -2320,6 +2935,7 @@ spec: listKind: KubeControllersConfigurationList plural: kubecontrollersconfigurations singular: kubecontrollersconfiguration + preserveUnknownFields: false scope: Cluster versions: - name: v1 @@ -2410,6 +3026,11 @@ spec: type: string type: object type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer etcdV3CompactionPeriod: description: 'EtcdV3CompactionPeriod is the period between etcdv3 compaction requests. Set to 0 to disable. [Default: 10m]' @@ -2520,6 +3141,11 @@ spec: type: string type: object type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer etcdV3CompactionPeriod: description: 'EtcdV3CompactionPeriod is the period between etcdv3 compaction requests. Set to 0 to disable. [Default: 10m]' @@ -2550,8 +3176,8 @@ status: plural: "" conditions: [] storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -2563,6 +3189,7 @@ spec: listKind: NetworkPolicyList plural: networkpolicies singular: networkpolicy + preserveUnknownFields: false scope: Namespaced versions: - name: v1 @@ -2712,8 +3339,8 @@ spec: within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, - Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n - Only valid on egress rules." + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes @@ -2938,8 +3565,8 @@ spec: within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, - Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n - Only valid on egress rules." + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes @@ -3085,8 +3712,8 @@ spec: within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, - Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n - Only valid on egress rules." + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes @@ -3311,8 +3938,8 @@ spec: within the selected service(s) will be matched, and only to/from each endpoint's port. \n Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, - Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n - Only valid on egress rules." + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." properties: name: description: Name specifies the name of a Kubernetes @@ -3386,8 +4013,8 @@ status: plural: "" conditions: [] storedVersions: [] - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -3399,6 +4026,7 @@ spec: listKind: NetworkSetList plural: networksets singular: networkset + preserveUnknownFields: false scope: Namespaced versions: - name: v1 @@ -3437,11 +4065,8 @@ status: plural: "" conditions: [] storedVersions: [] - ---- --- # Source: calico/templates/calico-kube-controllers-rbac.yaml - # Include a clusterrole for the kube-controllers component, # and bind it to the calico-kube-controllers serviceaccount. kind: ClusterRole @@ -3465,10 +4090,10 @@ rules: - get - list - watch - # IPAM resources are manipulated when nodes are deleted. + # IPAM resources are manipulated in response to node and block updates, as well as periodic triggers. - apiGroups: ["crd.projectcalico.org"] resources: - - ippools + - ipreservations verbs: - list - apiGroups: ["crd.projectcalico.org"] @@ -3483,6 +4108,13 @@ rules: - update - delete - watch + # Pools are watched to maintain a mapping of blocks to IP pools. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - watch # kube-controllers manages hostendpoints. - apiGroups: ["crd.projectcalico.org"] resources: @@ -3499,8 +4131,10 @@ rules: - clusterinformations verbs: - get + - list - create - update + - watch # KubeControllersConfiguration is where it gets its config - apiGroups: ["crd.projectcalico.org"] resources: @@ -3514,21 +4148,6 @@ rules: - update # watch for changes - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico-kube-controllers -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-kube-controllers -subjects: -- kind: ServiceAccount - name: calico-kube-controllers - namespace: kube-system ---- - --- # Source: calico/templates/calico-node-rbac.yaml # Include a clusterrole for the calico-node DaemonSet, @@ -3538,6 +4157,14 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-node rules: + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - calico-node + verbs: + - create # The CNI plugin needs to get pods, nodes, and namespaces. - apiGroups: [""] resources: @@ -3609,6 +4236,7 @@ rules: - globalbgpconfigs - bgpconfigurations - ippools + - ipreservations - ipamblocks - globalnetworkpolicies - globalnetworksets @@ -3617,6 +4245,7 @@ rules: - clusterinformations - hostendpoints - blockaffinities + - caliconodestatuses verbs: - get - list @@ -3630,6 +4259,12 @@ rules: verbs: - create - update + # Calico must update some CRDs. + - apiGroups: [ "crd.projectcalico.org" ] + resources: + - caliconodestatuses + verbs: + - update # Calico stores some configuration information on the node. - apiGroups: [""] resources: @@ -3659,11 +4294,14 @@ rules: - create - update - delete + # The CNI plugin and calico/node need to be able to create a default + # IPAMConfiguration - apiGroups: ["crd.projectcalico.org"] resources: - ipamconfigs verbs: - get + - create # Block affinities must also be watchable by confd for route aggregation. - apiGroups: ["crd.projectcalico.org"] resources: @@ -3677,8 +4315,22 @@ rules: - daemonsets verbs: - get - --- +# Source: calico/templates/calico-kube-controllers-rbac.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system +--- +# Source: calico/templates/calico-node-rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -3691,7 +4343,6 @@ subjects: - kind: ServiceAccount name: calico-node namespace: kube-system - --- # Source: calico/templates/calico-node.yaml # This manifest installs the calico-node container, as well @@ -3740,6 +4391,7 @@ spec: # upgraded to use calico-ipam. - name: upgrade-ipam image: {{ .BinaryImageName }} + imagePullPolicy: IfNotPresent command: ["/opt/cni/bin/calico-ipam", "-upgrade"] envFrom: - configMapRef: @@ -3767,6 +4419,7 @@ spec: # and CNI network config file on each node. - name: install-cni image: {{ .BinaryImageName }} + imagePullPolicy: IfNotPresent command: ["/opt/cni/bin/install"] envFrom: - configMapRef: @@ -3804,13 +4457,29 @@ spec: name: cni-net-dir securityContext: privileged: true - # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes - # to communicate with Felix over the Policy Sync API. - - name: flexvol-driver - image: {{ .FelixDriverImageName }} + # This init container mounts the necessary filesystems needed by the BPF data plane + # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed + # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. + - name: "mount-bpffs" + image: {{ .DaemonSetImageName }} + imagePullPolicy: IfNotPresent + command: ["calico-node", "-init", "-best-effort"] volumeMounts: - - name: flexvol-driver-host - mountPath: /host/driver + - mountPath: /sys/fs + name: sys-fs + # Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + - mountPath: /var/run/calico + name: var-run-calico + # Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + # Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary, + # executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly. + - mountPath: /nodeproc + name: nodeproc + readOnly: true securityContext: privileged: true containers: @@ -3819,6 +4488,7 @@ spec: # host. - name: calico-node image: {{ .DaemonSetImageName }} + imagePullPolicy: IfNotPresent envFrom: - configMapRef: # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. @@ -3854,6 +4524,9 @@ spec: # Enable or Disable VXLAN on the default IP pool. - name: CALICO_IPV4POOL_VXLAN value: "Never" + # Enable or Disable VXLAN on the default IPv6 IP pool. + - name: CALICO_IPV6POOL_VXLAN + value: "Never" # Set MTU for tunnel device used if ipip is enabled - name: FELIX_IPINIPMTU valueFrom: @@ -3877,6 +4550,8 @@ spec: # no effect. This should fall within `--cluster-cidr`. # - name: CALICO_IPV4POOL_CIDR # value: "192.168.0.0/16" + - name: CALICO_IPV4POOL_CIDR + value: {{ .PodCIDR }} # Disable file logging so `kubectl logs` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" @@ -3893,6 +4568,12 @@ spec: resources: requests: cpu: 250m + lifecycle: + preStop: + exec: + command: + - /bin/calico-node + - -shutdown livenessProbe: exec: command: @@ -3932,11 +4613,8 @@ spec: mountPath: /var/run/nodeagent # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the # parent directory. - - name: sysfs - mountPath: /sys/fs/ - # Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host. - # If the host is known to mount that filesystem already then Bidirectional can be omitted. - mountPropagation: Bidirectional + - name: bpffs + mountPath: /sys/fs/bpf - name: cni-log-dir mountPath: /var/log/calico/cni readOnly: true @@ -3955,10 +4633,18 @@ spec: hostPath: path: /run/xtables.lock type: FileOrCreate - - name: sysfs + - name: sys-fs hostPath: path: /sys/fs/ type: DirectoryOrCreate + - name: bpffs + hostPath: + path: /sys/fs/bpf + type: Directory + # mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs. + - name: nodeproc + hostPath: + path: /proc # Used to install CNI. - name: cni-bin-dir hostPath: @@ -3981,19 +4667,6 @@ spec: hostPath: type: DirectoryOrCreate path: /var/run/nodeagent - # Used to install Flex Volume Driver - - name: flexvol-driver-host - hostPath: - type: DirectoryOrCreate - path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-node - namespace: kube-system - --- # Source: calico/templates/calico-kube-controllers.yaml # See https://github.com/projectcalico/kube-controllers @@ -4027,11 +4700,14 @@ spec: operator: Exists - key: node-role.kubernetes.io/master effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule serviceAccountName: calico-kube-controllers priorityClassName: system-cluster-critical containers: - name: calico-kube-controllers image: {{ .DeploymentImageName }} + imagePullPolicy: IfNotPresent env: # Choose which controllers to run. - name: ENABLED_CONTROLLERS @@ -4053,38 +4729,3 @@ spec: - /usr/bin/check-status - -r periodSeconds: 10 - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-kube-controllers - namespace: kube-system - ---- - -# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict - -apiVersion: policy/v1{{if .LegacyPodDisruptionBudget}}beta1{{end}} -kind: PodDisruptionBudget -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers -spec: - maxUnavailable: 1 - selector: - matchLabels: - k8s-app: calico-kube-controllers - ---- -# Source: calico/templates/calico-etcd-secrets.yaml - ---- -# Source: calico/templates/calico-typha.yaml - ---- -# Source: calico/templates/configure-canal.yaml - diff --git a/pkg/minikube/cni/cni.go b/pkg/minikube/cni/cni.go index ca0d767081..03c9a7988a 100644 --- a/pkg/minikube/cni/cni.go +++ b/pkg/minikube/cni/cni.go @@ -20,8 +20,11 @@ package cni import ( "context" "fmt" + "net" "os/exec" "path" + "path/filepath" + "strings" "time" "github.com/blang/semver/v4" @@ -43,20 +46,6 @@ const ( // DefaultConfDir is the default CNI Config Directory path DefaultConfDir = "/etc/cni/net.d" - // CustomConfDir is the custom CNI Config Directory path used to avoid conflicting CNI configs - // ref: https://github.com/kubernetes/minikube/issues/10984 and https://github.com/kubernetes/minikube/pull/11106 - CustomConfDir = "/etc/cni/net.mk" -) - -var ( - // ConfDir is the CNI Config Directory path that can be customised, defaulting to DefaultConfDir - ConfDir = DefaultConfDir - - // Network is the network name that CNI should use (eg, "kindnet"). - // Currently, only crio (and podman) can use it, so that setting custom ConfDir is not necessary. - // ref: https://github.com/cri-o/cri-o/issues/2121 (and https://github.com/containers/podman/issues/2370) - // ref: https://github.com/cri-o/cri-o/blob/master/docs/crio.conf.5.md#crionetwork-table - Network = "" ) // Runner is the subset of command.Runner this package consumes @@ -115,10 +104,6 @@ func New(cc *config.ClusterConfig) (Manager, error) { cnm, err = NewCustom(*cc, cc.KubernetesConfig.CNI) } - if err := configureCNI(cc, cnm); err != nil { - klog.Errorf("unable to set CNI Config Directory: %v", err) - } - return cnm, err } @@ -145,11 +130,6 @@ func chooseDefault(cc config.ClusterConfig) Manager { return Bridge{} } - if driver.BareMetal(cc.Driver) { - klog.Infof("Driver %s used, CNI unnecessary in this configuration, recommending no CNI", cc.Driver) - return Disabled{cc: cc} - } - if len(cc.Nodes) > 1 || cc.MultiNodeRequested { // Enables KindNet CNI in master in multi node cluster, This solves the network problem // inside pod for multi node clusters. See https://github.com/kubernetes/minikube/issues/9838. @@ -159,10 +139,22 @@ func chooseDefault(cc config.ClusterConfig) Manager { if cc.KubernetesConfig.ContainerRuntime != constants.Docker { if driver.IsKIC(cc.Driver) { - klog.Infof("%q driver + %s runtime found, recommending kindnet", cc.Driver, cc.KubernetesConfig.ContainerRuntime) + klog.Infof("%q driver + %q runtime found, recommending kindnet", cc.Driver, cc.KubernetesConfig.ContainerRuntime) return KindNet{cc: cc} } - klog.Infof("%q driver + %s runtime found, recommending bridge", cc.Driver, cc.KubernetesConfig.ContainerRuntime) + klog.Infof("%q driver + %q runtime found, recommending bridge", cc.Driver, cc.KubernetesConfig.ContainerRuntime) + return Bridge{cc: cc} + } + + // for docker container runtime and k8s v1.24+ where dockershim and kubenet were removed, we fallback to bridge cni for cri-docker(d) + // ref: https://github.com/Mirantis/cri-dockerd#important + // ref: https://github.com/Mirantis/cri-dockerd#to-use-with-kubernetes + // note: currently, default cni that we "distribute" (in /etc/cni/net.d) is based on cri-o bridge, and + // because it does not currently use portmap plugin, we pick "our" bridge instead (cri-o one will be disabled automatically) + // ref: https://github.com/cri-o/cri-o/blob/f317b267ddef21aee5ffc92d890a77112b006815/contrib/cni/10-crio-bridge.conflist + kv, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion) + if err == nil && kv.GTE(semver.MustParse("1.24.0-alpha.2")) { + klog.Infof("%q driver + %q container runtime found on kubernetes v1.24+, recommending bridge", cc.Driver, cc.KubernetesConfig.ContainerRuntime) return Bridge{cc: cc} } @@ -200,41 +192,118 @@ func applyManifest(cc config.ClusterConfig, r Runner, f assets.CopyableFile) err return nil } -// configureCNI - to avoid conflicting CNI configs, it sets: -// - for crio: 'cni_default_network' config param via cni.Network -// - for containerd and docker: kubelet's '--cni-conf-dir' flag to custom CNI Config Directory path (same used also by CNI Deployment). -// ref: https://github.com/kubernetes/minikube/issues/10984 and https://github.com/kubernetes/minikube/pull/11106 -// Note: currently, this change affects only Kindnet CNI (and all multinodes using it), but it can be easily expanded to other/all CNIs if needed. -// Note2: Cilium does not need workaround as they automatically restart pods after CNI is successfully deployed. -func configureCNI(cc *config.ClusterConfig, cnm Manager) error { - if _, kindnet := cnm.(KindNet); kindnet { - // crio only needs CNI network name; hopefully others (containerd, docker and kubeadm/kubelet) will follow eventually - if cc.KubernetesConfig.ContainerRuntime == constants.CRIO { - Network = "kindnet" - return nil - } - version, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion) - if err != nil { - return err - } - // The CNI configuration is handled by CRI in 1.24+ - if version.LT(semver.MustParse("1.24.0-alpha.2")) { - // for containerd and docker: auto-set custom CNI via kubelet's 'cni-conf-dir' param, if not user-specified - eo := fmt.Sprintf("kubelet.cni-conf-dir=%s", CustomConfDir) - if !cc.KubernetesConfig.ExtraOptions.Exists(eo) { - klog.Infof("auto-setting extra-config to %q", eo) - if err := cc.KubernetesConfig.ExtraOptions.Set(eo); err != nil { - return fmt.Errorf("failed auto-setting extra-config %q: %v", eo, err) - } - ConfDir = CustomConfDir - klog.Infof("extra-config set to %q", eo) - } else { - // respect user-specified custom CNI Config Directory - ConfDir = cc.KubernetesConfig.ExtraOptions.Get("cni-conf-dir", "kubelet") - } - } else { - ConfDir = CustomConfDir - } +// ConfigureLoopbackCNI configures loopback cni. +// If disable is true, sets extension of its config file in /etc/cni/net.d to "mk_disabled". +// Otherwise, ensures loopback cni has expected version ("1.0.0") and valid name ("loopback") in its config file in /etc/cni/net.d. +// Note: cri-o is leaving out name atm (https://github.com/cri-o/cri-o/pull/6273). +// Avoid errors like: +// - Failed to create pod sandbox: rpc error: code = Unknown desc = [failed to set up sandbox container "..." network for pod "...": networkPlugin cni failed to set up pod "..." network: missing network name:, +// - failed to clean up sandbox container "..." network for pod "...": networkPlugin cni failed to teardown pod "..." network: missing network name] +// It is caller's responsibility to restart container runtime for these changes to take effect. +func ConfigureLoopbackCNI(r Runner, disable bool) error { + loopback := "/etc/cni/net.d/*loopback.conf*" // usually: 200-loopback.conf + // turn { "cniVersion": "0.3.1", "type": "loopback" } + // into { "cniVersion": "0.3.1", "name": "loopback", "type": "loopback" } + if _, err := r.RunCmd(exec.Command("sh", "-c", fmt.Sprintf("stat %s", loopback))); err != nil { + klog.Warningf("loopback cni configuration skipped: %q not found", loopback) + return nil } + + findExec := []string{"find", filepath.Dir(loopback), "-maxdepth", "1", "-type", "f", "-name", filepath.Base(loopback), "-not", "-name", "*.mk_disabled", "-exec", "sh", "-c"} + + if disable { + if _, err := r.RunCmd(exec.Command( + "sudo", append(findExec, + `sudo mv {} {}.mk_disabled`, ";")...)); err != nil { + return fmt.Errorf("unable to disable loopback cni %q: %v", loopback, err) + } + klog.Infof("loopback cni configuration disabled: %q found", loopback) + return nil + } + + if _, err := r.RunCmd(exec.Command( + "sudo", append(findExec, + `grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}`, ";")...)); err != nil { + return fmt.Errorf("unable to patch loopback cni config %q: %v", loopback, err) + } + klog.Infof("loopback cni configuration patched: %q found", loopback) + return nil +} + +// ConfigureDefaultBridgeCNIs configures all default bridge CNIs on a node (designated by runner). +// If network plugin is set (could be, eg "cni" or "kubenet"), it will disable all default bridges to avoid conflicts. +// Otherwise, it will configure all default bridges to match DefaultPodCIDR subnet range. +// It's usually called before deploying new CNI and on node restarts, to avoid conflicts and flip-flopping of pods' ip addresses. +// It is caller's responsibility to restart container runtime for these changes to take effect. +func ConfigureDefaultBridgeCNIs(r Runner, networkPlugin string) error { + if networkPlugin != "" { + return disableAllBridgeCNIs(r) + } + return configureAllBridgeCNIs(r, DefaultPodCIDR) +} + +// disableAllBridgeCNIs disables all bridge cnis by changing extension to "mk_disabled" of all *bridge* config file(s) found in default location (ie, /etc/cni/net.d). +func disableAllBridgeCNIs(r Runner) error { + path := "/etc/cni/net.d" + + out, err := r.RunCmd(exec.Command( + // for cri-o, we also disable 87-podman.conflist (that does not have 'bridge' in its name) + "sudo", "find", path, "-maxdepth", "1", "-type", "f", "(", "(", "-name", "*bridge*", "-or", "-name", "*podman*", ")", "-and", "-not", "-name", "*.mk_disabled", ")", "-printf", "%p, ", "-exec", "sh", "-c", + `sudo mv {} {}.mk_disabled`, ";")) + if err != nil { + return fmt.Errorf("failed to disable all bridge cni configs in %q: %v", path, err) + } + configs := strings.Trim(out.Stdout.String(), ", ") + if len(configs) == 0 { + klog.Infof("no active bridge cni configs found in %q - nothing to disable", path) + return nil + } + klog.Infof("disabled [%s] bridge cni config(s)", configs) + return nil +} + +// configureAllBridgeCNIs configures all bridge cnis by changing ip address range to match DefaultPodCIDR in all *bridge* config file(s) found in default location (ie, /etc/cni/net.d). +// ref: https://github.com/containernetworking/cni/blob/main/libcni/conf.go +// ref: https://kubernetes.io/docs/tasks/administer-cluster/migrating-from-dockershim/troubleshooting-cni-plugin-related-errors/ +func configureAllBridgeCNIs(r Runner, cidr string) error { + // non-podman bridge configs: + out, err := r.RunCmd(exec.Command( + "sudo", "find", DefaultConfDir, "-maxdepth", "1", "-type", "f", "-name", "*bridge*", "-not", "-name", "*podman*", "-not", "-name", "*.mk_disabled", "-printf", "%p, ", "-exec", "sh", "-c", + // remove ipv6 entries to avoid "failed to set bridge addr: could not add IP address to \"cni0\": permission denied" + // ref: https://github.com/cri-o/cri-o/issues/3555 + // then also remove trailing comma after ipv4 elements, if any + // ie, this will transform from, eg: + // from: "ranges": [ [{ "subnet": "10.85.0.0/16" }], [{ "subnet": "1100:200::/24" }] ] + // to: "ranges": [ [{ "subnet": "10.244.0.0/16" }] ] + // getting something similar to https://github.com/cri-o/cri-o/blob/main/contrib/cni/11-crio-ipv4-bridge.conflist + fmt.Sprintf(`sudo sed -i -r -e '/"dst": ".*:.*"/d' -e 's|^(.*)"dst": (.*)[,*]$|\1"dst": \2|g' -e '/"subnet": ".*:.*"/d' -e 's|^(.*)"subnet": ".*"(.*)[,*]$|\1"subnet": "%s"\2|g' {}`, cidr), ";")) + if err != nil { + return fmt.Errorf("failed to configure non-podman bridge cni configs in %q: %v", DefaultConfDir, err) + } + configs := out.Stdout.String() + + // podman bridge config(s): + // could be eg, 87-podman-bridge.conflist or 87-podman.conflist + // ref: https://github.com/containers/podman/blob/main/cni/87-podman-bridge.conflist + ip, ipnet, err := net.ParseCIDR(cidr) + if err != nil || ip.To4() == nil { + return fmt.Errorf("cidr %q is not valid ipv4 address: %v", cidr, err) + } + gateway := ip.Mask(ipnet.Mask) + gateway[3]++ + out, err = r.RunCmd(exec.Command( + "sudo", "find", DefaultConfDir, "-maxdepth", "1", "-type", "f", "-name", "*podman*", "-not", "-name", "*.mk_disabled", "-printf", "%p, ", "-exec", "sh", "-c", + fmt.Sprintf(`sudo sed -i -r -e 's|^(.*)"subnet": ".*"(.*)$|\1"subnet": "%s"\2|g' -e 's|^(.*)"gateway": ".*"(.*)$|\1"gateway": "%s"\2|g' {}`, cidr, gateway), ";")) + if err != nil { + return fmt.Errorf("failed to configure podman bridge cni configs in %q: %v", DefaultConfDir, err) + } + configs += out.Stdout.String() + + configs = strings.Trim(configs, ", ") + if len(configs) == 0 { + klog.Infof("no active bridge cni configs found in %q - nothing to configure", DefaultConfDir) + return nil + } + klog.Infof("configured [%s] bridge cni config(s)", configs) return nil } diff --git a/pkg/minikube/cni/flannel.go b/pkg/minikube/cni/flannel.go index b368b1fa8a..c906dafd46 100644 --- a/pkg/minikube/cni/flannel.go +++ b/pkg/minikube/cni/flannel.go @@ -18,634 +18,25 @@ package cni import ( "bytes" - "fmt" + _ "embed" "os/exec" - "path/filepath" "text/template" - "github.com/blang/semver/v4" "github.com/pkg/errors" - "k8s.io/klog/v2" + "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" - "k8s.io/minikube/pkg/util" ) -// From https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -var flannelYaml = `---{{if .LegacyPodSecurityPolicy}} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: psp.flannel.unprivileged - annotations: - seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default - seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default - apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default - apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default -spec: - privileged: false - volumes: - - configMap - - secret - - emptyDir - - hostPath - allowedHostPaths: - - pathPrefix: "/etc/cni/net.d" - - pathPrefix: "/etc/kube-flannel" - - pathPrefix: "/run/flannel" - readOnlyRootFilesystem: false - # Users and groups - runAsUser: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - fsGroup: - rule: RunAsAny - # Privilege Escalation - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - # Capabilities - allowedCapabilities: ['NET_ADMIN'] - defaultAddCapabilities: [] - requiredDropCapabilities: [] - # Host namespaces - hostPID: false - hostIPC: false - hostNetwork: true - hostPorts: - - min: 0 - max: 65535 - # SELinux - seLinux: - # SELinux is unused in CaaSP - rule: 'RunAsAny'{{else}} -kind: Namespace -apiVersion: v1 -metadata: - name: kube-system - labels: - pod-security.kubernetes.io/enforce: privileged{{end}} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: flannel -rules:{{if .LegacyPodSecurityPolicy}} - - apiGroups: ['extensions'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: ['psp.flannel.unprivileged']{{end}} - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: flannel - namespace: kube-system ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: flannel - namespace: kube-system ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: kube-flannel-cfg - namespace: kube-system - labels: - tier: node - app: flannel -data: - cni-conf.json: | - { - "name": "cbr0", - "cniVersion": "0.3.1", - "plugins": [ - { - "type": "flannel", - "delegate": { - "hairpinMode": true, - "isDefaultGateway": true - } - }, - { - "type": "portmap", - "capabilities": { - "portMappings": true - } - } - ] - } - net-conf.json: | - { - "Network": "10.244.0.0/16", - "Backend": { - "Type": "vxlan" - } - } ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: kube-flannel-ds-amd64 - namespace: kube-system - labels: - tier: node - app: flannel -spec: - selector: - matchLabels: - app: flannel - template: - metadata: - labels: - tier: node - app: flannel - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: In - values: - - linux - - key: kubernetes.io/arch - operator: In - values: - - amd64 - hostNetwork: true - tolerations: - - operator: Exists - effect: NoSchedule - serviceAccountName: flannel - initContainers: - - name: install-cni - image: quay.io/coreos/flannel:v0.12.0-amd64 - command: - - cp - args: - - -f - - /etc/kube-flannel/cni-conf.json - - /etc/cni/net.d/10-flannel.conflist - volumeMounts: - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - containers: - - name: kube-flannel - image: quay.io/coreos/flannel:v0.12.0-amd64 - command: - - /opt/bin/flanneld - args: - - --ip-masq - - --kube-subnet-mgr - resources: - requests: - cpu: "100m" - memory: "50Mi" - limits: - cpu: "100m" - memory: "50Mi" - securityContext: - privileged: false - capabilities: - add: ["NET_ADMIN"] - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumeMounts: - - name: run - mountPath: /run/flannel - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - - name: run - hostPath: - path: /run/flannel - - name: cni - hostPath: - path: /etc/cni/net.d - - name: flannel-cfg - configMap: - name: kube-flannel-cfg ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: kube-flannel-ds-arm64 - namespace: kube-system - labels: - tier: node - app: flannel -spec: - selector: - matchLabels: - app: flannel - template: - metadata: - labels: - tier: node - app: flannel - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: In - values: - - linux - - key: kubernetes.io/arch - operator: In - values: - - arm64 - hostNetwork: true - tolerations: - - operator: Exists - effect: NoSchedule - serviceAccountName: flannel - initContainers: - - name: install-cni - image: quay.io/coreos/flannel:v0.12.0-arm64 - command: - - cp - args: - - -f - - /etc/kube-flannel/cni-conf.json - - /etc/cni/net.d/10-flannel.conflist - volumeMounts: - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - containers: - - name: kube-flannel - image: quay.io/coreos/flannel:v0.12.0-arm64 - command: - - /opt/bin/flanneld - args: - - --ip-masq - - --kube-subnet-mgr - resources: - requests: - cpu: "100m" - memory: "50Mi" - limits: - cpu: "100m" - memory: "50Mi" - securityContext: - privileged: false - capabilities: - add: ["NET_ADMIN"] - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumeMounts: - - name: run - mountPath: /run/flannel - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - - name: run - hostPath: - path: /run/flannel - - name: cni - hostPath: - path: /etc/cni/net.d - - name: flannel-cfg - configMap: - name: kube-flannel-cfg ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: kube-flannel-ds-arm - namespace: kube-system - labels: - tier: node - app: flannel -spec: - selector: - matchLabels: - app: flannel - template: - metadata: - labels: - tier: node - app: flannel - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: In - values: - - linux - - key: kubernetes.io/arch - operator: In - values: - - arm - hostNetwork: true - tolerations: - - operator: Exists - effect: NoSchedule - serviceAccountName: flannel - initContainers: - - name: install-cni - image: quay.io/coreos/flannel:v0.12.0-arm - command: - - cp - args: - - -f - - /etc/kube-flannel/cni-conf.json - - /etc/cni/net.d/10-flannel.conflist - volumeMounts: - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - containers: - - name: kube-flannel - image: quay.io/coreos/flannel:v0.12.0-arm - command: - - /opt/bin/flanneld - args: - - --ip-masq - - --kube-subnet-mgr - resources: - requests: - cpu: "100m" - memory: "50Mi" - limits: - cpu: "100m" - memory: "50Mi" - securityContext: - privileged: false - capabilities: - add: ["NET_ADMIN"] - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumeMounts: - - name: run - mountPath: /run/flannel - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - - name: run - hostPath: - path: /run/flannel - - name: cni - hostPath: - path: /etc/cni/net.d - - name: flannel-cfg - configMap: - name: kube-flannel-cfg ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: kube-flannel-ds-ppc64le - namespace: kube-system - labels: - tier: node - app: flannel -spec: - selector: - matchLabels: - app: flannel - template: - metadata: - labels: - tier: node - app: flannel - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: In - values: - - linux - - key: kubernetes.io/arch - operator: In - values: - - ppc64le - hostNetwork: true - tolerations: - - operator: Exists - effect: NoSchedule - serviceAccountName: flannel - initContainers: - - name: install-cni - image: quay.io/coreos/flannel:v0.12.0-ppc64le - command: - - cp - args: - - -f - - /etc/kube-flannel/cni-conf.json - - /etc/cni/net.d/10-flannel.conflist - volumeMounts: - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - containers: - - name: kube-flannel - image: quay.io/coreos/flannel:v0.12.0-ppc64le - command: - - /opt/bin/flanneld - args: - - --ip-masq - - --kube-subnet-mgr - resources: - requests: - cpu: "100m" - memory: "50Mi" - limits: - cpu: "100m" - memory: "50Mi" - securityContext: - privileged: false - capabilities: - add: ["NET_ADMIN"] - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumeMounts: - - name: run - mountPath: /run/flannel - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - - name: run - hostPath: - path: /run/flannel - - name: cni - hostPath: - path: /etc/cni/net.d - - name: flannel-cfg - configMap: - name: kube-flannel-cfg ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: kube-flannel-ds-s390x - namespace: kube-system - labels: - tier: node - app: flannel -spec: - selector: - matchLabels: - app: flannel - template: - metadata: - labels: - tier: node - app: flannel - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: In - values: - - linux - - key: kubernetes.io/arch - operator: In - values: - - s390x - hostNetwork: true - tolerations: - - operator: Exists - effect: NoSchedule - serviceAccountName: flannel - initContainers: - - name: install-cni - image: quay.io/coreos/flannel:v0.12.0-s390x - command: - - cp - args: - - -f - - /etc/kube-flannel/cni-conf.json - - /etc/cni/net.d/10-flannel.conflist - volumeMounts: - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - containers: - - name: kube-flannel - image: quay.io/coreos/flannel:v0.12.0-s390x - command: - - /opt/bin/flanneld - args: - - --ip-masq - - --kube-subnet-mgr - resources: - requests: - cpu: "100m" - memory: "50Mi" - limits: - cpu: "100m" - memory: "50Mi" - securityContext: - privileged: false - capabilities: - add: ["NET_ADMIN"] - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumeMounts: - - name: run - mountPath: /run/flannel - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - - name: run - hostPath: - path: /run/flannel - - name: cni - hostPath: - path: /etc/cni/net.d - - name: flannel-cfg - configMap: - name: kube-flannel-cfg -` +// ref: https://github.com/flannel-io/flannel#deploying-flannel-manually: "For Kubernetes v1.17+"; multi-arch support +//go:embed flannel.yaml +var flannelYaml string + +// https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml var flannelTmpl = template.Must(template.New("flannel").Parse(flannelYaml)) type flannelTmplStruct struct { - LegacyPodSecurityPolicy bool + PodCIDR string } // Flannel is the Flannel CNI manager @@ -666,36 +57,23 @@ func (c Flannel) Apply(r Runner) error { return errors.Wrap(err, "required 'portmap' CNI plug-in not found") } - if driver.IsKIC(c.cc.Driver) { - conflict := "/etc/cni/net.d/100-crio-bridge.conf" - - _, err := r.RunCmd(exec.Command("stat", conflict)) - if err != nil { - klog.Warningf("%s not found, skipping disable step: %v", conflict, err) - return nil - } - - _, err = r.RunCmd(exec.Command("sudo", "mv", conflict, filepath.Join(filepath.Dir(conflict), "DISABLED-"+filepath.Base(conflict)))) - if err != nil { - klog.Errorf("unable to disable %s: %v", conflict, err) - } - } - - k8sVersion, err := util.ParseKubernetesVersion(c.cc.KubernetesConfig.KubernetesVersion) + m, err := c.manifest() if err != nil { - return fmt.Errorf("failed to parse Kubernetes version: %v", err) + return errors.Wrap(err, "manifest") } + return applyManifest(c.cc, r, m) +} +// manifest returns a Kubernetes manifest for a CNI +func (c Flannel) manifest() (assets.CopyableFile, error) { input := &flannelTmplStruct{ - LegacyPodSecurityPolicy: k8sVersion.LT(semver.Version{Major: 1, Minor: 25}), + PodCIDR: DefaultPodCIDR, } - b := bytes.Buffer{} if err := flannelTmpl.Execute(&b, input); err != nil { - return err + return nil, err } - - return applyManifest(c.cc, r, manifestAsset(b.Bytes())) + return manifestAsset(b.Bytes()), nil } // CIDR returns the default CIDR used by this CNI diff --git a/pkg/minikube/cni/flannel.yaml b/pkg/minikube/cni/flannel.yaml new file mode 100644 index 0000000000..378def674b --- /dev/null +++ b/pkg/minikube/cni/flannel.yaml @@ -0,0 +1,212 @@ +--- +kind: Namespace +apiVersion: v1 +metadata: + name: kube-flannel + labels: + pod-security.kubernetes.io/enforce: privileged +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +- apiGroups: + - "networking.k8s.io" + resources: + - clustercidrs + verbs: + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-flannel +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: kube-flannel +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-flannel + labels: + tier: node + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "{{ .PodCIDR }}", + "Backend": { + "Type": "vxlan" + } + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds + namespace: kube-flannel + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + hostNetwork: true + priorityClassName: system-node-critical + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni-plugin + #image: flannelcni/flannel-cni-plugin:v1.1.2 #for ppc64le and mips64le (dockerhub limitations may apply) + image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.2 + command: + - cp + args: + - -f + - /flannel + - /opt/cni/bin/flannel + volumeMounts: + - name: cni-plugin + mountPath: /opt/cni/bin + - name: install-cni + #image: flannelcni/flannel:v0.20.2 #for ppc64le and mips64le (dockerhub limitations may apply) + image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + #image: flannelcni/flannel:v0.20.2 #for ppc64le and mips64le (dockerhub limitations may apply) + image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2 + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: EVENT_QUEUE_DEPTH + value: "5000" + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: xtables-lock + mountPath: /run/xtables.lock + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni-plugin + hostPath: + path: /opt/cni/bin + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate diff --git a/pkg/minikube/cni/kindnet.go b/pkg/minikube/cni/kindnet.go index d25d4b6135..fa30e14430 100644 --- a/pkg/minikube/cni/kindnet.go +++ b/pkg/minikube/cni/kindnet.go @@ -166,7 +166,7 @@ func (c KindNet) manifest() (assets.CopyableFile, error) { DefaultRoute: "0.0.0.0/0", // assumes IPv4 PodCIDR: DefaultPodCIDR, ImageName: images.KindNet(c.cc.KubernetesConfig.ImageRepository), - CNIConfDir: ConfDir, + CNIConfDir: DefaultConfDir, } b := bytes.Buffer{} diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index ff5d33da5f..ec614208d6 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -54,6 +54,7 @@ const ( SSHPort = 22 // RegistryAddonPort os the default registry addon port RegistryAddonPort = 5000 + // Containerd is the default name and spelling for the containerd container runtime Containerd = "containerd" // CRIO is the default name and spelling for the cri-o container runtime @@ -63,6 +64,12 @@ const ( // DefaultContainerRuntime is our default container runtime DefaultContainerRuntime = "" + // cgroup drivers + DefaultCgroupDriver = "systemd" + CgroupfsCgroupDriver = "cgroupfs" + SystemdCgroupDriver = "systemd" + UnknownCgroupDriver = "" + // APIServerName is the default API server name APIServerName = "minikubeCA" // ClusterDNSDomain is the default DNS domain diff --git a/pkg/minikube/cruntime/containerd.go b/pkg/minikube/cruntime/containerd.go index b9167b6147..e9ab39f684 100644 --- a/pkg/minikube/cruntime/containerd.go +++ b/pkg/minikube/cruntime/containerd.go @@ -26,6 +26,7 @@ import ( "os" "os/exec" "path" + "runtime" "strings" "time" @@ -37,6 +38,7 @@ import ( "k8s.io/minikube/pkg/minikube/cni" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/minikube/sysinit" @@ -127,18 +129,51 @@ func (r *Containerd) Available() error { } // generateContainerdConfig sets up /etc/containerd/config.toml & /etc/containerd/containerd.conf.d/02-containerd.conf -func generateContainerdConfig(cr CommandRunner, imageRepository string, kv semver.Version, forceSystemd bool, insecureRegistry []string, inUserNamespace bool) error { +func generateContainerdConfig(cr CommandRunner, imageRepository string, kv semver.Version, cgroupDriver string, insecureRegistry []string, inUserNamespace bool) error { pauseImage := images.Pause(kv, imageRepository) - if _, err := cr.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo sed -e 's|^.*sandbox_image = .*$|sandbox_image = \"%s\"|' -i %s", pauseImage, containerdConfigFile))); err != nil { + if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = %q|' %s`, pauseImage, containerdConfigFile))); err != nil { return errors.Wrap(err, "update sandbox_image") } - if _, err := cr.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo sed -e 's|^.*restrict_oom_score_adj = .*$|restrict_oom_score_adj = %t|' -i %s", inUserNamespace, containerdConfigFile))); err != nil { + if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = %t|' %s`, inUserNamespace, containerdConfigFile))); err != nil { return errors.Wrap(err, "update restrict_oom_score_adj") } - if _, err := cr.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo sed -e 's|^.*SystemdCgroup = .*$|SystemdCgroup = %t|' -i %s", forceSystemd, containerdConfigFile))); err != nil { - return errors.Wrap(err, "update SystemdCgroup") + + // configure cgroup driver + if cgroupDriver == constants.UnknownCgroupDriver { + klog.Warningf("unable to configure containerd to use unknown cgroup driver, will use default %q instead", constants.DefaultCgroupDriver) + cgroupDriver = constants.DefaultCgroupDriver } - if _, err := cr.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo sed -e 's|^.*conf_dir = .*$|conf_dir = \"%s\"|' -i %s", cni.ConfDir, containerdConfigFile))); err != nil { + klog.Infof("configuring containerd to use %q as cgroup driver...", cgroupDriver) + useSystemd := cgroupDriver == constants.SystemdCgroupDriver + if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = %t|g' %s`, useSystemd, containerdConfigFile))); err != nil { + return errors.Wrap(err, "configuring SystemdCgroup") + } + + // handle deprecated/removed features + // ref: https://github.com/containerd/containerd/blob/main/RELEASES.md#deprecated-features + if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' %s`, containerdConfigFile))); err != nil { + return errors.Wrap(err, "configuring io.containerd.runtime version") + } + + // avoid containerd v1.6.14+ "failed to load plugin io.containerd.grpc.v1.cri" error="invalid plugin config: `systemd_cgroup` only works for runtime io.containerd.runtime.v1.linux" error + // that then leads to crictl "getting the runtime version: rpc error: code = Unimplemented desc = unknown service runtime.v1alpha2.RuntimeService" error + // ref: https://github.com/containerd/containerd/issues/4203 + if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i '/systemd_cgroup/d' %s`, containerdConfigFile))); err != nil { + return errors.Wrap(err, "removing deprecated systemd_cgroup param") + } + + // "runtime_type" has to be specified and it should be "io.containerd.runc.v2" + // ref: https://github.com/containerd/containerd/issues/6964#issuecomment-1132378279 + if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' %s`, containerdConfigFile))); err != nil { + return errors.Wrap(err, "configuring io.containerd.runc version") + } + + // ensure conf_dir is using '/etc/cni/net.d' + // we might still want to try removing '/etc/cni/net.mk' in case of upgrade from previous minikube version that had/used it + if _, err := cr.RunCmd(exec.Command("sh", "-c", `sudo rm -rf /etc/cni/net.mk`)); err != nil { + klog.Warningf("unable to remove /etc/cni/net.mk directory: %v", err) + } + if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = %q|g' %s`, cni.DefaultConfDir, containerdConfigFile))); err != nil { return errors.Wrap(err, "update conf_dir") } @@ -175,7 +210,8 @@ func generateContainerdConfig(cr CommandRunner, imageRepository string, kv semve } // Enable idempotently enables containerd on a host -func (r *Containerd) Enable(disOthers, forceSystemd, inUserNamespace bool) error { +// It is also called by docker.Enable() - if bound to containerd, to enforce proper containerd configuration completed by service restart. +func (r *Containerd) Enable(disOthers bool, cgroupDriver string, inUserNamespace bool) error { if inUserNamespace { if err := CheckKernelCompatibility(r.Runner, 5, 11); err != nil { // For using overlayfs @@ -194,13 +230,26 @@ func (r *Containerd) Enable(disOthers, forceSystemd, inUserNamespace bool) error if err := populateCRIConfig(r.Runner, r.SocketPath()); err != nil { return err } - if err := generateContainerdConfig(r.Runner, r.ImageRepository, r.KubernetesVersion, forceSystemd, r.InsecureRegistry, inUserNamespace); err != nil { + + if err := generateContainerdConfig(r.Runner, r.ImageRepository, r.KubernetesVersion, cgroupDriver, r.InsecureRegistry, inUserNamespace); err != nil { return err } if err := enableIPForwarding(r.Runner); err != nil { return err } + // TODO (@prezha): remove this hack after proper version update in minikube release + // ref: https://github.com/containerd/containerd/blob/main/RELEASES.md#kubernetes-support + targetVersion := "1.6.15" + currentVersion, err := r.Version() + if err == nil && semver.MustParse(targetVersion).GT(semver.MustParse(currentVersion)) { + klog.Infof("replacing original containerd with v%s-linux-%s", targetVersion, runtime.GOARCH) + _ = r.Init.ForceStop("containerd") + if err := updateContainerdBinary(r.Runner, targetVersion, runtime.GOARCH); err != nil { + klog.Warningf("unable to replace original containerd with v%s-linux-%s: %v", targetVersion, runtime.GOARCH, err) + } + } + // Otherwise, containerd will fail API requests with 'Unimplemented' return r.Init.Restart("containerd") } @@ -387,21 +436,41 @@ func (r *Containerd) CGroupDriver() (string, error) { if err != nil { return "", err } - if info["config"] == nil { - return "", errors.Wrapf(err, "missing config") + + // crictl also returns default ('false') value for "systemdCgroup" - deprecated "systemd_cgroup" config param that is now irrelevant + // ref: https://github.com/containerd/containerd/blob/5e7baa2eb3dab4c4365dd63c05ed8b3fa94b9271/pkg/cri/config/config.go#L277-L280 + // ref: https://github.com/containerd/containerd/issues/4574#issuecomment-1298727099 + // so, we try to extract runc's "SystemdCgroup" option that we care about + // ref: https://github.com/containerd/containerd/issues/4203#issuecomment-651532765 + j, err := json.Marshal(info) + if err != nil { + return "", fmt.Errorf("marshalling: %v", err) } - config, ok := info["config"].(map[string]interface{}) - if !ok { - return "", errors.Wrapf(err, "config not map") + s := struct { + Config struct { + Containerd struct { + Runtimes struct { + Runc struct { + Options struct { + SystemdCgroup bool `json:"SystemdCgroup"` + } `json:"options"` + } `json:"runc"` + } `json:"runtimes"` + } `json:"containerd"` + } `json:"config"` + }{} + if err := json.Unmarshal(j, &s); err != nil { + return "", fmt.Errorf("unmarshalling: %v", err) } - cgroupManager := "cgroupfs" // default - switch config["systemdCgroup"] { - case false: - cgroupManager = "cgroupfs" + // note: if "path" does not exists, SystemdCgroup will evaluate to false as 'default' value for bool => constants.CgroupfsCgroupDriver + switch s.Config.Containerd.Runtimes.Runc.Options.SystemdCgroup { case true: - cgroupManager = "systemd" + return constants.SystemdCgroupDriver, nil + case false: + return constants.CgroupfsCgroupDriver, nil + default: + return constants.DefaultCgroupDriver, nil } - return cgroupManager, nil } // KubeletOptions returns kubelet options for a containerd @@ -410,7 +479,6 @@ func (r *Containerd) KubeletOptions() map[string]string { "container-runtime": "remote", "container-runtime-endpoint": fmt.Sprintf("unix://%s", r.SocketPath()), "image-service-endpoint": fmt.Sprintf("unix://%s", r.SocketPath()), - "runtime-request-timeout": "15m", } } @@ -500,7 +568,7 @@ func (r *Containerd) Preload(cc config.ClusterConfig) error { if rr, err := r.Runner.RunCmd(exec.Command("sudo", "tar", "-I", "lz4", "-C", "/var", "-xf", dest)); err != nil { return errors.Wrapf(err, "extracting tarball: %s", rr.Output()) } - klog.Infof("Took %f seconds t extract the tarball", time.Since(t).Seconds()) + klog.Infof("Took %f seconds to extract the tarball", time.Since(t).Seconds()) // remove the tarball in the VM if err := r.Runner.Remove(fa); err != nil { @@ -510,7 +578,7 @@ func (r *Containerd) Preload(cc config.ClusterConfig) error { return r.Restart() } -// Restart restarts Docker on a host +// Restart restarts this container runtime on a host func (r *Containerd) Restart() error { return r.Init.Restart("containerd") } diff --git a/pkg/minikube/cruntime/cri.go b/pkg/minikube/cruntime/cri.go index f1f3c11ccc..bfa4964f2a 100644 --- a/pkg/minikube/cruntime/cri.go +++ b/pkg/minikube/cruntime/cri.go @@ -190,7 +190,7 @@ func killCRIContainers(cr CommandRunner, ids []string) error { klog.Infof("Killing containers: %s", ids) crictl := getCrictlPath(cr) - args := append([]string{crictl, "rm"}, ids...) + args := append([]string{crictl, "rm", "--force"}, ids...) c := exec.Command("sudo", args...) if _, err := cr.RunCmd(c); err != nil { return errors.Wrap(err, "crictl") @@ -232,7 +232,11 @@ func stopCRIContainers(cr CommandRunner, ids []string) error { klog.Infof("Stopping containers: %s", ids) crictl := getCrictlPath(cr) - args := append([]string{crictl, "stop"}, ids...) + // bring crictl stop timeout on par with docker: + // - docker stop --help => -t, --time int Seconds to wait for stop before killing it (default 10) + // - crictl stop --help => --timeout value, -t value Seconds to wait to kill the container after a graceful stop is requested (default: 0) + // to prevent "stuck" containers blocking ports (eg, "[ERROR Port-2379|2380]: Port 2379|2380 is in use" for etcd during "hot" k8s upgrade) + args := append([]string{crictl, "stop", "--timeout=10"}, ids...) c := exec.Command("sudo", args...) if _, err := cr.RunCmd(c); err != nil { return errors.Wrap(err, "crictl") diff --git a/pkg/minikube/cruntime/crio.go b/pkg/minikube/cruntime/crio.go index 86af57ac97..0b38b200cd 100644 --- a/pkg/minikube/cruntime/crio.go +++ b/pkg/minikube/cruntime/crio.go @@ -31,16 +31,16 @@ import ( "k8s.io/klog/v2" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/cni" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/minikube/sysinit" ) const ( - // CRIOConfFile is the path to the CRI-O configuration + // crioConfigFile is the path to the CRI-O configuration crioConfigFile = "/etc/crio/crio.conf.d/02-crio.conf" ) @@ -53,29 +53,40 @@ type CRIO struct { Init sysinit.Manager } -// generateCRIOConfig sets up /etc/crio/crio.conf -func generateCRIOConfig(cr CommandRunner, imageRepository string, kv semver.Version) error { +// generateCRIOConfig sets up pause image and cgroup manager for cri-o in crioConfigFile +func generateCRIOConfig(cr CommandRunner, imageRepository string, kv semver.Version, cgroupDriver string) error { pauseImage := images.Pause(kv, imageRepository) - - c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo sed -e 's|^.*pause_image = .*$|pause_image = \"%s\"|' -i %s", pauseImage, crioConfigFile)) + klog.Infof("configure cri-o to use %q pause image...", pauseImage) + c := exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i 's|^.*pause_image = .*$|pause_image = %q|' %s`, pauseImage, crioConfigFile)) if _, err := cr.RunCmd(c); err != nil { - return errors.Wrap(err, "generateCRIOConfig") + return errors.Wrap(err, "update pause_image") } - if cni.Network != "" { - klog.Infof("Updating CRIO to use the custom CNI network %q", cni.Network) - if _, err := cr.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo sed -e 's|^.*cni_default_network = .*$|cni_default_network = \"%s\"|' -i %s", cni.Network, crioConfigFile))); err != nil { - return errors.Wrap(err, "update network_dir") - } + // configure cgroup driver + if cgroupDriver == constants.UnknownCgroupDriver { + klog.Warningf("unable to configure cri-o to use unknown cgroup driver, will use default %q instead", constants.DefaultCgroupDriver) + cgroupDriver = constants.DefaultCgroupDriver + } + klog.Infof("configuring cri-o to use %q as cgroup driver...", cgroupDriver) + if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i 's|^.*cgroup_manager = .*$|cgroup_manager = %q|' %s`, cgroupDriver, crioConfigFile))); err != nil { + return errors.Wrap(err, "configuring cgroup_manager") + } + // explicitly set conmon_cgroup to avoid errors like: + // - level=fatal msg="Validating runtime config: conmon cgroup should be 'pod' or a systemd slice" + // - level=fatal msg="Validating runtime config: cgroupfs manager conmon cgroup should be 'pod' or empty" + // ref: https://github.com/cri-o/cri-o/pull/3940 + // ref: https://github.com/cri-o/cri-o/issues/6047 + // ref: https://kubernetes.io/docs/setup/production-environment/container-runtimes/#cgroup-driver + if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i '/conmon_cgroup = .*/d' %s`, crioConfigFile))); err != nil { + return errors.Wrap(err, "removing conmon_cgroup") + } + if _, err := cr.RunCmd(exec.Command("sh", "-c", fmt.Sprintf(`sudo sed -i '/cgroup_manager = .*/a conmon_cgroup = %q' %s`, "pod", crioConfigFile))); err != nil { + return errors.Wrap(err, "configuring conmon_cgroup") } - return nil -} - -func (r *CRIO) forceSystemd() error { - c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo sed -e 's|^.*cgroup_manager = .*$|cgroup_manager = \"systemd\"|' -i %s", crioConfigFile)) - if _, err := r.Runner.RunCmd(c); err != nil { - return errors.Wrap(err, "force systemd") + // we might still want to try removing '/etc/cni/net.mk' in case of upgrade from previous minikube version that had/used it + if _, err := cr.RunCmd(exec.Command("sh", "-c", `sudo rm -rf /etc/cni/net.mk`)); err != nil { + klog.Warningf("unable to remove /etc/cni/net.mk directory: %v", err) } return nil @@ -185,7 +196,7 @@ Environment="_CRIO_ROOTLESS=1" } // Enable idempotently enables CRIO on a host -func (r *CRIO) Enable(disOthers, forceSystemd, inUserNamespace bool) error { +func (r *CRIO) Enable(disOthers bool, cgroupDriver string, inUserNamespace bool) error { if disOthers { if err := disableOthers(r, r.Runner); err != nil { klog.Warningf("disableOthers: %v", err) @@ -194,17 +205,12 @@ func (r *CRIO) Enable(disOthers, forceSystemd, inUserNamespace bool) error { if err := populateCRIConfig(r.Runner, r.SocketPath()); err != nil { return err } - if err := generateCRIOConfig(r.Runner, r.ImageRepository, r.KubernetesVersion); err != nil { + if err := generateCRIOConfig(r.Runner, r.ImageRepository, r.KubernetesVersion, cgroupDriver); err != nil { return err } if err := enableIPForwarding(r.Runner); err != nil { return err } - if forceSystemd { - if err := r.forceSystemd(); err != nil { - return err - } - } if inUserNamespace { if err := CheckKernelCompatibility(r.Runner, 5, 11); err != nil { // For using overlayfs @@ -219,7 +225,7 @@ func (r *CRIO) Enable(disOthers, forceSystemd, inUserNamespace bool) error { } } // NOTE: before we start crio explicitly here, crio might be already started automatically - return r.Init.Start("crio") + return r.Init.Restart("crio") } // Disable idempotently disables CRIO on a host @@ -356,7 +362,6 @@ func (r *CRIO) KubeletOptions() map[string]string { "container-runtime": "remote", "container-runtime-endpoint": r.SocketPath(), "image-service-endpoint": r.SocketPath(), - "runtime-request-timeout": "15m", } } @@ -446,7 +451,7 @@ func (r *CRIO) Preload(cc config.ClusterConfig) error { if rr, err := r.Runner.RunCmd(exec.Command("sudo", "tar", "-I", "lz4", "-C", "/var", "-xf", dest)); err != nil { return errors.Wrapf(err, "extracting tarball: %s", rr.Output()) } - klog.Infof("Took %f seconds t extract the tarball", time.Since(t).Seconds()) + klog.Infof("Took %f seconds to extract the tarball", time.Since(t).Seconds()) // remove the tarball in the VM if err := r.Runner.Remove(fa); err != nil { diff --git a/pkg/minikube/cruntime/cruntime.go b/pkg/minikube/cruntime/cruntime.go index 4b4017a71e..0cfee9be2b 100644 --- a/pkg/minikube/cruntime/cruntime.go +++ b/pkg/minikube/cruntime/cruntime.go @@ -81,7 +81,7 @@ type Manager interface { // Version retrieves the current version of this runtime Version() (string, error) // Enable idempotently enables this runtime on a host - Enable(bool, bool, bool) error + Enable(bool, string, bool) error // Disable idempotently disables this runtime on a host Disable() error // Active returns whether or not a runtime is active on a host @@ -345,9 +345,35 @@ func ConfigureNetworkPlugin(r Manager, cr CommandRunner, networkPlugin string) e } return nil } - dm, ok := r.(*Docker) - if !ok { - return fmt.Errorf("name and type mismatch") - } - return dockerConfigureNetworkPlugin(*dm, cr, networkPlugin) + return dockerConfigureNetworkPlugin(cr, networkPlugin) +} + +// updateCRIDockerdBinary updates cri-dockerd to version +func updateCRIDockerdBinary(cr CommandRunner, version, arch string) error { + curl := fmt.Sprintf("curl -sSfL https://github.com/Mirantis/cri-dockerd/releases/download/v%s/cri-dockerd-%s.%s.tgz | tar -xz -C /tmp", version, version, arch) + if _, err := cr.RunCmd(exec.Command("sudo", "sh", "-c", curl)); err != nil { + return fmt.Errorf("unable to download cri-dockerd version %s: %v", version, err) + } + if _, err := cr.RunCmd(exec.Command("sudo", "chmod", "a+x", "/tmp/cri-dockerd/cri-dockerd")); err != nil { + return fmt.Errorf("unable to chmod cri-dockerd version %s: %v", version, err) + } + if _, err := cr.RunCmd(exec.Command("sudo", "mv", "/tmp/cri-dockerd/cri-dockerd", "/usr/bin/cri-dockerd")); err != nil { + return fmt.Errorf("unable to install cri-dockerd version %s: %v", version, err) + } + return nil +} + +// updateContainerdBinary updates containerd to version +func updateContainerdBinary(cr CommandRunner, version, arch string) error { + curl := fmt.Sprintf("curl -sSfL https://github.com/containerd/containerd/releases/download/v%s/containerd-%s-linux-%s.tar.gz | tar -xz -C /tmp", version, version, arch) + if _, err := cr.RunCmd(exec.Command("sudo", "sh", "-c", curl)); err != nil { + return fmt.Errorf("unable to download containerd version %s: %v", version, err) + } + if _, err := cr.RunCmd(exec.Command("sudo", "sh", "-c", "chmod a+x /tmp/bin/*")); err != nil { // note: has to run in subshell because of wildcard! + return fmt.Errorf("unable to chmod containerd version %s: %v", version, err) + } + if _, err := cr.RunCmd(exec.Command("sudo", "sh", "-c", "mv /tmp/bin/* /usr/bin/")); err != nil { // note: has to run in subshell because of wildcard! + return fmt.Errorf("unable to install containerd version %s: %v", version, err) + } + return nil } diff --git a/pkg/minikube/cruntime/cruntime_test.go b/pkg/minikube/cruntime/cruntime_test.go index c5d12c024a..1348060390 100644 --- a/pkg/minikube/cruntime/cruntime_test.go +++ b/pkg/minikube/cruntime/cruntime_test.go @@ -30,6 +30,7 @@ import ( "k8s.io/klog/v2" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/minikube/constants" ) func TestName(t *testing.T) { @@ -154,13 +155,11 @@ func TestKubeletOptions(t *testing.T) { "container-runtime": "remote", "container-runtime-endpoint": "/var/run/crio/crio.sock", "image-service-endpoint": "/var/run/crio/crio.sock", - "runtime-request-timeout": "15m", }}, {"containerd", map[string]string{ "container-runtime": "remote", "container-runtime-endpoint": "unix:///run/containerd/containerd.sock", "image-service-endpoint": "unix:///run/containerd/containerd.sock", - "runtime-request-timeout": "15m", }}, } for _, tc := range tests { @@ -461,7 +460,7 @@ func (f *FakeRunner) crictl(args []string, _ bool) (string, error) { return strings.Join(ids, "\n"), nil } case "stop": - for _, id := range args[1:] { + for _, id := range args[2:] { f.t.Logf("fake crictl: Stopping id %q", id) if f.containers[id] == "" { return "", fmt.Errorf("no such container") @@ -469,7 +468,7 @@ func (f *FakeRunner) crictl(args []string, _ bool) (string, error) { delete(f.containers, id) } case "rm": - for _, id := range args[1:] { + for _, id := range args[2:] { f.t.Logf("fake crictl: Removing id %q", id) if f.containers[id] == "" { return "", fmt.Errorf("no such container") @@ -630,7 +629,7 @@ func TestDisable(t *testing.T) { runtime string want []string }{ - {"docker", []string{"sudo", "systemctl", "stop", "-f", "docker.socket", "sudo", "systemctl", "stop", "-f", "docker.service", + {"docker", []string{"sudo", "systemctl", "stop", "-f", "cri-docker.socket", "sudo", "systemctl", "stop", "-f", "cri-docker.service", "sudo", "systemctl", "disable", "cri-docker.socket", "sudo", "systemctl", "mask", "cri-docker.service", "sudo", "systemctl", "stop", "-f", "docker.socket", "sudo", "systemctl", "stop", "-f", "docker.service", "sudo", "systemctl", "disable", "docker.socket", "sudo", "systemctl", "mask", "docker.service"}}, {"crio", []string{"sudo", "systemctl", "stop", "-f", "crio"}}, {"containerd", []string{"sudo", "systemctl", "stop", "-f", "containerd"}}, @@ -687,7 +686,7 @@ func TestEnable(t *testing.T) { map[string]serviceState{ "docker": SvcExited, "containerd": SvcExited, - "crio": SvcRunning, + "crio": SvcRestarted, "crio-shutdown": SvcExited, }}, } @@ -701,7 +700,7 @@ func TestEnable(t *testing.T) { if err != nil { t.Fatalf("New(%s): %v", tc.runtime, err) } - err = cr.Enable(true, false, false) + err = cr.Enable(true, constants.CgroupfsCgroupDriver, false) if err != nil { t.Errorf("%s disable unexpected error: %v", tc.runtime, err) } diff --git a/pkg/minikube/cruntime/docker.go b/pkg/minikube/cruntime/docker.go index 0fe4e3f955..0695ba4fd5 100644 --- a/pkg/minikube/cruntime/docker.go +++ b/pkg/minikube/cruntime/docker.go @@ -23,6 +23,7 @@ import ( "os" "os/exec" "path" + "runtime" "strings" "text/template" "time" @@ -33,9 +34,9 @@ import ( "k8s.io/klog/v2" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/cni" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/docker" "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/image" @@ -126,7 +127,7 @@ func (r *Docker) Active() bool { } // Enable idempotently enables Docker on a host -func (r *Docker) Enable(disOthers, forceSystemd, inUserNamespace bool) error { +func (r *Docker) Enable(disOthers bool, cgroupDriver string, inUserNamespace bool) error { if inUserNamespace { return errors.New("inUserNamespace must not be true for docker") } @@ -149,10 +150,8 @@ func (r *Docker) Enable(disOthers, forceSystemd, inUserNamespace bool) error { klog.ErrorS(err, "Failed to enable", "service", "docker.socket") } - if forceSystemd { - if err := r.forceSystemd(); err != nil { - return err - } + if err := r.setCGroup(cgroupDriver); err != nil { + return err } if err := r.Init.Restart("docker"); err != nil { @@ -160,10 +159,26 @@ func (r *Docker) Enable(disOthers, forceSystemd, inUserNamespace bool) error { } if r.CRIService != "" { + // TODO (@prezha): remove this hack after proper version update in minikube release + // deploy/iso/minikube-iso/arch/x86_64/package/cri-dockerd/cri-dockerd.* + // deploy/iso/minikube-iso/arch/aarch64/package/cri-dockerd-aarch64/cri-dockerd.* + // note: https://github.com/Mirantis/cri-dockerd/blob/master/Makefile changed => also needs updating .mk files?! + targetVersion := "0.3.0" + klog.Infof("replacing original cri-dockerd with v%s-%s", targetVersion, runtime.GOARCH) + if err := updateCRIDockerdBinary(r.Runner, targetVersion, runtime.GOARCH); err != nil { + klog.Warningf("unable to replace original cri-dockerd with v%s-%s: %v", targetVersion, runtime.GOARCH, err) + } + + if err := r.Init.Enable("cri-docker.socket"); err != nil { + return err + } + if err := r.Init.Unmask(r.CRIService); err != nil { + return err + } if err := r.Init.Enable(r.CRIService); err != nil { return err } - if err := r.Init.Start(r.CRIService); err != nil { + if err := r.Init.Restart(r.CRIService); err != nil { return err } } @@ -178,25 +193,34 @@ func (r *Docker) Restart() error { // Disable idempotently disables Docker on a host func (r *Docker) Disable() error { - if r.CRIService != "" { - if err := r.Init.Stop(r.CRIService); err != nil { - return err - } - if err := r.Init.Disable(r.CRIService); err != nil { - return err - } + // even if r.CRIService is undefined, it might still be available, so try to disable it and just warn then fallthrough if unsuccessful + klog.Info("disabling cri-docker service (if available) ...") + criSocket := "cri-docker.socket" + criService := "cri-docker.service" + if err := r.Init.ForceStop(criSocket); err != nil { + klog.Warningf("Failed to stop socket %q (might be ok): %v", criSocket, err) } + if err := r.Init.ForceStop(criService); err != nil { + klog.Warningf("Failed to stop service %q (might be ok): %v", criService, err) + } + if err := r.Init.Disable(criSocket); err != nil { + klog.Warningf("Failed to disable socket %q (might be ok): %v", criSocket, err) + } + if err := r.Init.Mask(criService); err != nil { + klog.Warningf("Failed to mask service %q (might be ok): %v", criService, err) + } + klog.Info("disabling docker service ...") // because #10373 if err := r.Init.ForceStop("docker.socket"); err != nil { - klog.ErrorS(err, "Failed to stop", "service", "docker.socket") + klog.ErrorS(err, "Failed to stop", "socket", "docker.socket") } if err := r.Init.ForceStop("docker.service"); err != nil { klog.ErrorS(err, "Failed to stop", "service", "docker.service") return err } if err := r.Init.Disable("docker.socket"); err != nil { - klog.ErrorS(err, "Failed to disable", "service", "docker.socket") + klog.ErrorS(err, "Failed to disable", "socket", "docker.socket") } return r.Init.Mask("docker.service") } @@ -373,7 +397,6 @@ func (r *Docker) KubeletOptions() map[string]string { "container-runtime": "remote", "container-runtime-endpoint": r.SocketPath(), "image-service-endpoint": r.SocketPath(), - "runtime-request-timeout": "15m", } } return map[string]string{ @@ -507,18 +530,23 @@ func (r *Docker) SystemLogCmd(len int) string { return fmt.Sprintf("sudo journalctl -u docker -n %d", len) } -// ForceSystemd forces the docker daemon to use systemd as cgroup manager -func (r *Docker) forceSystemd() error { - klog.Infof("Forcing docker to use systemd as cgroup manager...") - daemonConfig := `{ -"exec-opts": ["native.cgroupdriver=systemd"], +// setCGroup configures the docker daemon to use driver as cgroup manager +// ref: https://docs.docker.com/engine/reference/commandline/dockerd/#options-for-the-runtime +func (r *Docker) setCGroup(driver string) error { + if driver == constants.UnknownCgroupDriver { + return fmt.Errorf("unable to configure docker to use unknown cgroup driver") + } + + klog.Infof("configuring docker to use %q as cgroup driver...", driver) + daemonConfig := fmt.Sprintf(`{ +"exec-opts": ["native.cgroupdriver=%s"], "log-driver": "json-file", "log-opts": { "max-size": "100m" }, "storage-driver": "overlay2" } -` +`, driver) ma := assets.NewMemoryAsset([]byte(daemonConfig), "/etc/docker", "daemon.json", "0644") return r.Runner.Copy(ma) } @@ -668,24 +696,22 @@ func (r *Docker) ImagesPreloaded(images []string) bool { const ( CNIBinDir = "/opt/cni/bin" - CNIConfDir = "/etc/cni/net.d" CNICacheDir = "/var/lib/cni/cache" ) -func dockerConfigureNetworkPlugin(r Docker, cr CommandRunner, networkPlugin string) error { +func dockerConfigureNetworkPlugin(cr CommandRunner, networkPlugin string) error { + // $ cri-dockerd --version + // cri-dockerd 0.2.6 (d8accf7) + // $ cri-dockerd --help | grep -i cni + // --cni-bin-dir string A comma-separated list of full paths of directories in which to search for CNI plugin binaries. (default "/opt/cni/bin") + // --cni-cache-dir string The full path of the directory in which CNI should store cache files. (default "/var/lib/cni/cache") + // --cni-conf-dir string The full path of the directory in which to search for CNI config files (default "/etc/cni/net.d") + // --network-plugin string The name of the network plugin to be invoked for various events in kubelet/pod lifecycle. (default "cni") + args := " --hairpin-mode=hairpin-veth" + // if network plugin is not selected - use default "cni" if networkPlugin == "" { - // no-op plugin - return nil + networkPlugin = "cni" } - - args := "" - if networkPlugin == "cni" { - args += " --cni-bin-dir=" + CNIBinDir - args += " --cni-cache-dir=" + CNICacheDir - args += " --cni-conf-dir=" + cni.ConfDir - args += " --hairpin-mode=promiscuous-bridge" - } - opts := struct { NetworkPlugin string ExtraArguments string @@ -712,5 +738,5 @@ ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plug if err := cr.Copy(svc); err != nil { return errors.Wrap(err, "failed to copy template") } - return r.Init.Restart("cri-docker") + return nil } diff --git a/pkg/minikube/detect/detect.go b/pkg/minikube/detect/detect.go index 74b5dc5bd7..5bb51753e3 100644 --- a/pkg/minikube/detect/detect.go +++ b/pkg/minikube/detect/detect.go @@ -29,6 +29,7 @@ import ( "github.com/spf13/viper" "golang.org/x/sys/cpu" "k8s.io/klog/v2" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/localpath" ) @@ -147,3 +148,20 @@ func SocketVMNetInstalled() bool { } return false } + +// CgroupDriver returns detected cgroup driver as configured on host os. +// If unable to detect, it will return constants.DefaultCgroupDriver instead. +// ref: https://kubernetes.io/docs/setup/production-environment/container-runtimes/#cgroup-drivers +func CgroupDriver() string { + switch cgroupVersion() { + case "v1": + klog.Infof("detected %q cgroup driver on host os", constants.CgroupfsCgroupDriver) + return constants.CgroupfsCgroupDriver + case "v2": + klog.Infof("detected %q cgroup driver on host os", constants.SystemdCgroupDriver) + return constants.SystemdCgroupDriver + default: + klog.Warningf("unable to detect host's os cgroup driver - will continue and try with %q as per default, but things might break", constants.DefaultCgroupDriver) + return constants.DefaultCgroupDriver // try with default rather than just give up + } +} diff --git a/pkg/minikube/detect/detect_linux.go b/pkg/minikube/detect/detect_linux.go new file mode 100644 index 0000000000..4774ee688c --- /dev/null +++ b/pkg/minikube/detect/detect_linux.go @@ -0,0 +1,56 @@ +//go:build linux + +/* +Copyright 2022 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package detect + +import ( + "runtime" + + "golang.org/x/sys/unix" +) + +// cgroupVersion returns cgroup version as set on the linux OS host machine (where minikube runs). +// Possible options are: "v1", "v2" or "" (unknown). +// ref: https://kubernetes.io/docs/concepts/architecture/cgroups/#check-cgroup-version +// ref: https://man7.org/linux/man-pages/man7/cgroups.7.html +func cgroupVersion() string { + if runtime.GOOS != "linux" { + return "" + } + + // check '/sys/fs/cgroup' or '/sys/fs/cgroup/unified' type + var stat unix.Statfs_t + if err := unix.Statfs("/sys/fs/cgroup", &stat); err != nil { + return "" + } + // fallback, but could be misleading + if stat.Type != unix.TMPFS_MAGIC && stat.Type != unix.CGROUP_SUPER_MAGIC && stat.Type != unix.CGROUP2_SUPER_MAGIC { + if err := unix.Statfs("/sys/fs/cgroup/unified", &stat); err != nil { + return "" + } + } + + switch stat.Type { + case unix.TMPFS_MAGIC, unix.CGROUP_SUPER_MAGIC: // tmpfs, cgroupfs + return "v1" + case unix.CGROUP2_SUPER_MAGIC: // cgroup2fs + return "v2" + default: + return "" + } +} diff --git a/pkg/minikube/detect/detect_nonlinux.go b/pkg/minikube/detect/detect_nonlinux.go new file mode 100644 index 0000000000..ab2f0eb704 --- /dev/null +++ b/pkg/minikube/detect/detect_nonlinux.go @@ -0,0 +1,24 @@ +//go:build !linux + +/* +Copyright 2022 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package detect + +// cgroupVersion returns cgroups v1 for non-linux OS host machine (where minikube runs). +func cgroupVersion() string { + return "v1" +} diff --git a/pkg/minikube/download/download.go b/pkg/minikube/download/download.go index 83c2c3c862..c726905383 100644 --- a/pkg/minikube/download/download.go +++ b/pkg/minikube/download/download.go @@ -25,7 +25,7 @@ import ( "time" "github.com/hashicorp/go-getter" - "github.com/juju/mutex" + "github.com/juju/mutex/v2" "github.com/pkg/errors" "k8s.io/klog/v2" "k8s.io/minikube/pkg/minikube/detect" @@ -125,6 +125,7 @@ func lockDownload(file string) (mutex.Releaser, error) { go func() { spec := lock.PathMutexSpec(file) + spec.Timeout = 5 * time.Minute releaser, err := mutex.Acquire(spec) if err != nil { lockChannel <- retPair{nil, errors.Wrapf(err, "failed to acquire lock \"%s\": %+v", file, spec)} diff --git a/pkg/minikube/download/iso.go b/pkg/minikube/download/iso.go index fe69ffdf13..626fb206ce 100644 --- a/pkg/minikube/download/iso.go +++ b/pkg/minikube/download/iso.go @@ -26,7 +26,7 @@ import ( "strings" "time" - "github.com/juju/mutex" + "github.com/juju/mutex/v2" "github.com/pkg/errors" "k8s.io/klog/v2" "k8s.io/minikube/pkg/minikube/detect" diff --git a/pkg/minikube/driver/auxdriver/install.go b/pkg/minikube/driver/auxdriver/install.go index 76d1192155..854e9a48b5 100644 --- a/pkg/minikube/driver/auxdriver/install.go +++ b/pkg/minikube/driver/auxdriver/install.go @@ -26,7 +26,7 @@ import ( "time" "github.com/blang/semver/v4" - "github.com/juju/mutex" + "github.com/juju/mutex/v2" "github.com/pkg/errors" "k8s.io/klog/v2" diff --git a/pkg/minikube/image/cache.go b/pkg/minikube/image/cache.go index 13ed49a0f9..aa1b9e8be8 100644 --- a/pkg/minikube/image/cache.go +++ b/pkg/minikube/image/cache.go @@ -24,7 +24,7 @@ import ( "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/tarball" - "github.com/juju/mutex" + "github.com/juju/mutex/v2" "github.com/pkg/errors" "golang.org/x/sync/errgroup" "k8s.io/klog/v2" diff --git a/pkg/minikube/kubeconfig/settings.go b/pkg/minikube/kubeconfig/settings.go index da4077e028..f6b4c00ed4 100644 --- a/pkg/minikube/kubeconfig/settings.go +++ b/pkg/minikube/kubeconfig/settings.go @@ -21,7 +21,7 @@ import ( "path/filepath" "sync/atomic" - "github.com/juju/mutex" + "github.com/juju/mutex/v2" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/clientcmd/api" diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index 80210f8426..1afad1f1ce 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -33,7 +33,7 @@ import ( "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/engine" "github.com/docker/machine/libmachine/host" - "github.com/juju/mutex" + "github.com/juju/mutex/v2" "github.com/pkg/errors" "github.com/spf13/viper" "k8s.io/klog/v2" diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index a40c58de59..c98ba4cd72 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -50,6 +50,7 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/detect" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" @@ -209,6 +210,14 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { return nil, errors.Wrap(err, "cni apply") } } + + if !starter.Cfg.DisableOptimizations { + // Scale down CoreDNS from default 2 to 1 replica. + if err := kapi.ScaleDeployment(starter.Cfg.Name, meta.NamespaceSystem, kconst.CoreDNSDeploymentName, 1); err != nil { + klog.Errorf("Unable to scale down deployment %q in namespace %q to 1 replica: %v", kconst.CoreDNSDeploymentName, meta.NamespaceSystem, err) + } + } + klog.Infof("Will wait %s for node %+v", viper.GetDuration(waitTimeout), starter.Node) if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil { return nil, errors.Wrapf(err, "wait %s for node", viper.GetDuration(waitTimeout)) @@ -262,13 +271,6 @@ func handleAPIServer(starter Starter, cr cruntime.Manager, hostIP net.IP) (*kube return nil, bs, errors.Wrap(err, "Failed kubeconfig update") } - if !starter.Cfg.DisableOptimizations { - // Scale down CoreDNS from default 2 to 1 replica. - if err := kapi.ScaleDeployment(starter.Cfg.Name, meta.NamespaceSystem, kconst.CoreDNSDeploymentName, 1); err != nil { - klog.Errorf("Unable to scale down deployment %q in namespace %q to 1 replica: %v", kconst.CoreDNSDeploymentName, meta.NamespaceSystem, err) - } - } - // Not running this in a Go func can result in DNS answering taking up to 38 seconds, with the Go func it takes 6-10 seconds. go func() { // Inject {"host.minikube.internal": hostIP} record into CoreDNS. @@ -379,9 +381,27 @@ func configureRuntimes(runner cruntime.CommandRunner, cc config.ClusterConfig, k exit.Error(reason.InternalRuntime, "Failed runtime", err) } - disableOthers := true - if driver.BareMetal(cc.Driver) { - disableOthers = false + // 87-podman.conflist cni conf potentially conflicts with others and is created by podman on its first invocation, + // so we "provoke" it here to ensure it's generated and that we can disable it + // note: using 'help' or '--help' would be cheaper, but does not trigger that; 'version' seems to be next best option + if co.Type == constants.CRIO { + _, _ = runner.RunCmd(exec.Command("sudo", "sh", "-c", `podman version >/dev/null`)) + } + // ensure loopback is properly configured + // make sure container runtime is restarted afterwards for these changes to take effect + disableLoopback := co.Type == constants.CRIO + if err := cni.ConfigureLoopbackCNI(runner, disableLoopback); err != nil { + klog.Warningf("unable to name loopback interface in dockerConfigureNetworkPlugin: %v", err) + } + if kv.GTE(semver.MustParse("1.24.0-alpha.2")) { + if err := cruntime.ConfigureNetworkPlugin(cr, runner, cc.KubernetesConfig.NetworkPlugin); err != nil { + exit.Error(reason.RuntimeEnable, "Failed to configure network plugin", err) + } + } + // ensure all default CNI(s) are properly configured on each and every node (re)start + // make sure container runtime is restarted afterwards for these changes to take effect + if err := cni.ConfigureDefaultBridgeCNIs(runner, cc.KubernetesConfig.NetworkPlugin); err != nil { + klog.Errorf("unable to disable preinstalled bridge CNI(s): %v", err) } // Preload is overly invasive for bare metal, and caching is not meaningful. @@ -401,34 +421,82 @@ func configureRuntimes(runner cruntime.CommandRunner, cc config.ClusterConfig, k } } - if kv.GTE(semver.MustParse("1.24.0-alpha.2")) { - if err := cruntime.ConfigureNetworkPlugin(cr, runner, cc.KubernetesConfig.NetworkPlugin); err != nil { - exit.Error(reason.RuntimeEnable, "Failed to configure network plugin", err) + inUserNamespace := strings.Contains(cc.KubernetesConfig.FeatureGates, "KubeletInUserNamespace=true") + // for docker container runtime: ensure containerd is properly configured by calling Enable(), as docker could be bound to containerd + // it will also "soft" start containerd, but it will not disable others; docker will disable containerd if not used in the next step + if co.Type == constants.Docker { + containerd, err := cruntime.New(cruntime.Config{ + Type: constants.Containerd, + Socket: "", // use default + Runner: co.Runner, + ImageRepository: co.ImageRepository, + KubernetesVersion: co.KubernetesVersion, + InsecureRegistry: co.InsecureRegistry}) + if err == nil { + err = containerd.Enable(false, cgroupDriver(cc), inUserNamespace) // do not disableOthers, as it's not primary cr + } + if err != nil { + klog.Warningf("cannot ensure containerd is configured properly and reloaded for docker - cluster might be unstable: %v", err) } } - inUserNamespace := strings.Contains(cc.KubernetesConfig.FeatureGates, "KubeletInUserNamespace=true") - err = cr.Enable(disableOthers, forceSystemd(), inUserNamespace) - if err != nil { + disableOthers := !driver.BareMetal(cc.Driver) + if err = cr.Enable(disableOthers, cgroupDriver(cc), inUserNamespace); err != nil { exit.Error(reason.RuntimeEnable, "Failed to enable container runtime", err) } // Wait for the CRI to be "live", before returning it - err = waitForCRISocket(runner, cr.SocketPath(), 60, 1) - if err != nil { + if err = waitForCRISocket(runner, cr.SocketPath(), 60, 1); err != nil { exit.Error(reason.RuntimeEnable, "Failed to start container runtime", err) } // Wait for the CRI to actually work, before returning - err = waitForCRIVersion(runner, cr.SocketPath(), 60, 10) - if err != nil { + if err = waitForCRIVersion(runner, cr.SocketPath(), 60, 10); err != nil { exit.Error(reason.RuntimeEnable, "Failed to start container runtime", err) } + return cr } -func forceSystemd() bool { - return viper.GetBool("force-systemd") || os.Getenv(constants.MinikubeForceSystemdEnv) == "true" +// cgroupDriver returns cgroup driver that should be used to further configure container runtime, node(s) and cluster. +// It is based on: +// - (forced) user preference (set via flags or env), if present, or +// - default settings for vm or ssh driver, if user, or +// - host os config detection, if possible. +// Possible mappings are: "v1" (legacy) cgroups => "cgroupfs", "v2" (unified) cgroups => "systemd" and "" (unknown) cgroups => constants.DefaultCgroupDriver. +// Note: starting from k8s v1.22, "kubeadm clusters should be using the systemd driver": +// ref: https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.22.md#no-really-you-must-read-this-before-you-upgrade +// ref: https://kubernetes.io/docs/setup/production-environment/container-runtimes/#cgroup-drivers +// ref: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/ +func cgroupDriver(cc config.ClusterConfig) string { + klog.Info("detecting cgroup driver to use...") + + // check flags for user preference + if viper.GetBool("force-systemd") { + klog.Infof("using %q cgroup driver as enforced via flags", constants.SystemdCgroupDriver) + return constants.SystemdCgroupDriver + } + + // check env for user preference + env := os.Getenv(constants.MinikubeForceSystemdEnv) + if force, err := strconv.ParseBool(env); env != "" && err == nil && force { + klog.Infof("using %q cgroup driver as enforced via env", constants.SystemdCgroupDriver) + return constants.SystemdCgroupDriver + } + + // vm driver uses iso that boots with cgroupfs cgroup driver by default atm (keep in sync!) + if driver.IsVM(cc.Driver) { + return constants.CgroupfsCgroupDriver + } + + // for "remote baremetal", we assume cgroupfs and user can "force-systemd" with flag to override + // potential improvement: use systemd as default (in line with k8s) and allow user to override it with new flag (eg, "cgroup-driver", that would replace "force-systemd") + if driver.IsSSH(cc.Driver) { + return constants.CgroupfsCgroupDriver + } + + // in all other cases - try to detect and use what's on user's machine + return detect.CgroupDriver() } func pathExists(runner cruntime.CommandRunner, path string) (bool, error) { @@ -815,13 +883,20 @@ func addCoreDNSEntry(runner command.Runner, name, ip string, cc config.ClusterCo } // inject hosts block with host record into coredns configmap - sed := fmt.Sprintf("sed '/^ forward . \\/etc\\/resolv.conf.*/i \\ hosts {\\n %s %s\\n fallthrough\\n }'", ip, name) + sed := fmt.Sprintf("sed -e '/^ forward . \\/etc\\/resolv.conf.*/i \\ hosts {\\n %s %s\\n fallthrough\\n }'", ip, name) // check if hosts block already exists in coredns configmap hosts := regexp.MustCompile(`(?smU)^ *hosts {.*}`) if hosts.MatchString(cm) { // inject host record into existing coredns configmap hosts block instead klog.Info("CoreDNS already contains hosts block, will inject host record there...") - sed = fmt.Sprintf("sed '/^ hosts {.*/a \\ %s %s'", ip, name) + sed = fmt.Sprintf("sed -e '/^ hosts {.*/a \\ %s %s'", ip, name) + } + + // check if logging is already enabled (via log plugin) in coredns configmap, so not to duplicate it + logs := regexp.MustCompile(`(?smU)^ *log *$`) + if !logs.MatchString(cm) { + // inject log plugin into coredns configmap + sed = fmt.Sprintf("%s -e '/^ errors *$/i \\ log'", sed) } // replace coredns configmap via kubectl @@ -830,7 +905,7 @@ func addCoreDNSEntry(runner command.Runner, name, ip string, cc config.ClusterCo klog.Errorf("failed to inject {%q: %s} host record into CoreDNS", name, ip) return err } - klog.Infof("{%q: %s} host record injected into CoreDNS", name, ip) + klog.Infof("{%q: %s} host record injected into CoreDNS's ConfigMap", name, ip) return nil } diff --git a/pkg/network/network.go b/pkg/network/network.go index f0a35ed1ab..8a84260b2c 100644 --- a/pkg/network/network.go +++ b/pkg/network/network.go @@ -20,21 +20,15 @@ import ( "encoding/binary" "fmt" "net" - "sync" "time" + "github.com/juju/mutex/v2" "k8s.io/klog/v2" + "k8s.io/minikube/pkg/util/lock" ) const defaultReservationPeriod = 1 * time.Minute -var reservedSubnets = sync.Map{} - -// reservation of free private subnet is held for defined reservation period from createdAt time. -type reservation struct { - createdAt time.Time -} - // Parameters contains main network parameters. type Parameters struct { IP string // IP address of network @@ -47,6 +41,7 @@ type Parameters struct { Broadcast string // last IP address IsPrivate bool // whether the IP is private or not Interface + reservation mutex.Releaser // subnet reservation has lifespan of the process: "If a process dies while the mutex is held, the mutex is automatically released." } // Interface contains main network interface parameters. @@ -206,7 +201,8 @@ func FreeSubnet(startSubnet string, step, tries int) (*Parameters, error) { return nil, err } if !taken { - if ok := reserveSubnet(subnet, defaultReservationPeriod); ok { + if reservation, err := reserveSubnet(subnet, defaultReservationPeriod); err == nil { + n.reservation = reservation klog.Infof("using free private subnet %s: %+v", n.CIDR, n) return n, nil } @@ -242,39 +238,13 @@ func ParseAddr(addr string) (net.IP, *net.IPNet, error) { return ip, network, err } -// reserveSubnet returns if subnet was successfully reserved for given period: -// - false, if it already has unexpired reservation -// - true, if new reservation was created or expired one renewed -// -// uses sync.Map to manage reservations thread-safe -var reserveSubnet = func(subnet string, period time.Duration) bool { - // put 'zero' reservation{} Map value for subnet Map key - // to block other processes from concurrently changing this subnet - zero := reservation{} - r, loaded := reservedSubnets.LoadOrStore(subnet, zero) - // check if there was previously issued reservation - if loaded { - // back off if previous reservation was already set to 'zero' - // as then other process is already managing this subnet concurrently - if r == zero { - klog.Infof("backing off reserving subnet %s (other process is managing it!): %+v", subnet, &reservedSubnets) - return false - } - // check if previous reservation expired - createdAt := r.(reservation).createdAt - if time.Since(createdAt) < period { - // unexpired reservation: restore original createdAt value - reservedSubnets.Store(subnet, reservation{createdAt: createdAt}) - klog.Infof("skipping subnet %s that has unexpired reservation: %+v", subnet, &reservedSubnets) - return false - } - // expired reservation: renew setting createdAt to now - reservedSubnets.Store(subnet, reservation{createdAt: time.Now()}) - klog.Infof("reusing subnet %s that has expired reservation: %+v", subnet, &reservedSubnets) - return true +// reserveSubnet returns releaser if subnet was successfully reserved for given period, creating lock for subnet to avoid race condition between multiple minikube instances (especially while testing in parallel). +var reserveSubnet = func(subnet string, period time.Duration) (mutex.Releaser, error) { + spec := lock.PathMutexSpec(subnet) + spec.Timeout = 1 * time.Millisecond // practically: just check, don't wait + reservation, err := mutex.Acquire(spec) + if err != nil { + return nil, err } - // new reservation - klog.Infof("reserving subnet %s for %v: %+v", subnet, period, &reservedSubnets) - reservedSubnets.Store(subnet, reservation{createdAt: time.Now()}) - return true + return reservation, nil } diff --git a/pkg/network/network_test.go b/pkg/network/network_test.go index 5390bf5f3d..ad1eddead4 100644 --- a/pkg/network/network_test.go +++ b/pkg/network/network_test.go @@ -20,10 +20,12 @@ import ( "strings" "testing" "time" + + "github.com/juju/mutex/v2" ) func TestFreeSubnet(t *testing.T) { - reserveSubnet = func(subnet string, period time.Duration) bool { return true } + reserveSubnet = func(subnet string, period time.Duration) (mutex.Releaser, error) { return nil, nil } t.Run("NoRetriesSuccess", func(t *testing.T) { startingSubnet := "192.168.0.0" diff --git a/pkg/util/lock/lock.go b/pkg/util/lock/lock.go index 84158a2289..efb8dc01b4 100644 --- a/pkg/util/lock/lock.go +++ b/pkg/util/lock/lock.go @@ -23,7 +23,7 @@ import ( "time" "github.com/juju/clock" - "github.com/juju/mutex" + "github.com/juju/mutex/v2" "github.com/pkg/errors" "k8s.io/klog/v2" diff --git a/pkg/util/lock/lock_test.go b/pkg/util/lock/lock_test.go index 8ec94e5f24..e41976b5c2 100644 --- a/pkg/util/lock/lock_test.go +++ b/pkg/util/lock/lock_test.go @@ -19,7 +19,7 @@ package lock import ( "testing" - "github.com/juju/mutex" + "github.com/juju/mutex/v2" ) func TestUserMutexSpec(t *testing.T) { diff --git a/site/content/en/docs/faq/_index.md b/site/content/en/docs/faq/_index.md index 6544cd8e5d..c9510dde2c 100644 --- a/site/content/en/docs/faq/_index.md +++ b/site/content/en/docs/faq/_index.md @@ -55,8 +55,9 @@ minikube addons enable auto-pause ## Docker Driver: How can I set minikube's cgroup manager? -By default minikube uses the `cgroupfs` cgroup manager for Kubernetes clusters. If you are on a system with a systemd cgroup manager, this could cause conflicts. -To use the `systemd` cgroup manager, run: +For non-VM and non-SSH drivers, minikube will try to auto-detect your system's cgroups driver/manager and configure all other components accordingly. +For VM and SSH drivers, minikube will use cgroupfs cgroups driver/manager by default. +To force the `systemd` cgroup manager, run: ```bash minikube start --force-systemd=true diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index 88e1434a6d..f40a4f93cb 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -38,6 +38,7 @@ import ( "github.com/blang/semver/v4" retryablehttp "github.com/hashicorp/go-retryablehttp" "k8s.io/minikube/pkg/kapi" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/detect" "k8s.io/minikube/pkg/util/retry" ) @@ -68,7 +69,14 @@ func TestAddons(t *testing.T) { } // MOCK_GOOGLE_TOKEN forces the gcp-auth webhook to use a mock token instead of trying to get a valid one from the credentials. - t.Setenv("MOCK_GOOGLE_TOKEN", "true") + os.Setenv("MOCK_GOOGLE_TOKEN", "true") + + // for some reason, (Docker_Cloud_Shell) sets 'MINIKUBE_FORCE_SYSTEMD=true' while having cgroupfs set in docker (and probably os itself), which might make it unstable and occasionally fail: + // - I1226 15:05:24.834294 11286 out.go:177] - MINIKUBE_FORCE_SYSTEMD=true + // - I1226 15:05:25.070037 11286 info.go:266] docker info: {... CgroupDriver:cgroupfs ...} + // ref: https://storage.googleapis.com/minikube-builds/logs/15463/27154/Docker_Cloud_Shell.html + // so we override that here to let minikube auto-detect appropriate cgroup driver + os.Setenv(constants.MinikubeForceSystemdEnv, "") args := append([]string{"start", "-p", profile, "--wait=true", "--memory=4000", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=volumesnapshots", "--addons=csi-hostpath-driver", "--addons=gcp-auth", "--addons=cloud-spanner"}, StartArgs()...) if !NoneDriver() { // none driver does not support ingress @@ -733,7 +741,14 @@ func validateGCPAuthAddon(ctx context.Context, t *testing.T, profile string) { } // If we're on GCE, we have proper credentials and can test the registry secrets with an artifact registry image - if detect.IsOnGCE() && !detect.IsCloudShell() { + if detect.IsOnGCE() && !detect.IsCloudShell() && !VMDriver() { + t.Skip("skipping GCPAuth addon test until 'Permission \"artifactregistry.repositories.downloadArtifacts\" denied on resource \"projects/k8s-minikube/locations/us/repositories/test-artifacts\" (or it may not exist)' issue is resolved") + // "Setting the environment variable MOCK_GOOGLE_TOKEN to true will prevent using the google application credentials to fetch the token used for the image pull secret. Instead the token will be mocked." + // ref: https://github.com/GoogleContainerTools/gcp-auth-webhook#gcp-auth-webhook + os.Unsetenv("MOCK_GOOGLE_TOKEN") + // re-set MOCK_GOOGLE_TOKEN once we're done + defer os.Setenv("MOCK_GOOGLE_TOKEN", "true") + os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS") os.Unsetenv("GOOGLE_CLOUD_PROJECT") args := []string{"-p", profile, "addons", "enable", "gcp-auth"} diff --git a/test/integration/helpers_test.go b/test/integration/helpers_test.go index f3f2999419..f01124961f 100644 --- a/test/integration/helpers_test.go +++ b/test/integration/helpers_test.go @@ -263,10 +263,12 @@ func PostMortemLogs(t *testing.T, profile string, multinode ...bool) { t.Logf("%s: %v", rr.Command(), rerr) return } - notRunning := strings.Split(rr.Stdout.String(), " ") - if len(notRunning) == 0 { + // strings.Split("", " ") results in [""] slice of len 1 ! + out := strings.TrimSpace(rr.Stdout.String()) + if len(out) == 0 { continue } + notRunning := strings.Split(out, " ") t.Logf("non-running pods: %s", strings.Join(notRunning, " ")) t.Logf("======> post-mortem[%s]: describe non-running pods <======", t.Name()) diff --git a/test/integration/json_output_test.go b/test/integration/json_output_test.go index 0f4e53debb..03e7d1691c 100644 --- a/test/integration/json_output_test.go +++ b/test/integration/json_output_test.go @@ -120,7 +120,7 @@ func validateDistinctCurrentSteps(ctx context.Context, t *testing.T, ces []*clou // validateIncreasingCurrentSteps verifies that for a successful minikube start, 'current step' should be increasing func validateIncreasingCurrentSteps(ctx context.Context, t *testing.T, ces []*cloudEvent) { step := -1 - for _, ce := range ces { + for i, ce := range ces { currentStep, exists := ce.data["currentstep"] if !exists || ce.Type() != "io.k8s.sigs.minikube.step" { continue @@ -130,6 +130,17 @@ func validateIncreasingCurrentSteps(ctx context.Context, t *testing.T, ces []*cl t.Fatalf("current step is not an integer: %v\n%v", currentStep, ce) } if cs <= step { + // check if steps are mixed because of goroutines complete in unusual order, but still ok + // eg, "Enabling Addons" (goroutine) might complete before or after "Verifying Kubernetes" finishes + if i > 0 { + prev := ces[i-1].data["name"] + cur := ce.data["name"] + if cur == "Verifying Kubernetes" && prev == "Enabling Addons" { + t.Logf("unusual order of steps, might be ok: %q event came before %q", prev, cur) + step = cs + continue + } + } t.Fatalf("current step is not in increasing order: %v", ces) } step = cs diff --git a/test/integration/main_test.go b/test/integration/main_test.go index 3a8e9c7b74..051c302b29 100644 --- a/test/integration/main_test.go +++ b/test/integration/main_test.go @@ -159,6 +159,11 @@ func KicDriver() bool { return DockerDriver() || PodmanDriver() } +// VMDriver checks if the driver is a VM +func VMDriver() bool { + return !KicDriver() && !NoneDriver() +} + // ContainerRuntime returns the name of a specific container runtime if it was specified func ContainerRuntime() string { flag := "--container-runtime=" diff --git a/test/integration/net_test.go b/test/integration/net_test.go index 4556172302..dae8e1d781 100644 --- a/test/integration/net_test.go +++ b/test/integration/net_test.go @@ -40,6 +40,9 @@ import ( // Options tested: kubenet, bridge, flannel, kindnet, calico, cilium // Flags tested: enable-default-cni (legacy), false (CNI off), auto-detection func TestNetworkPlugins(t *testing.T) { + // generate reasonably unique profile name suffix to be used for all tests + suffix := UniqueProfileName("") + MaybeParallel(t) if NoneDriver() { t.Skip("skipping since test for none driver") @@ -51,33 +54,31 @@ func TestNetworkPlugins(t *testing.T) { args []string kubeletPlugin string podLabel string + namespace string hairpin bool }{ - // for containerd and crio runtimes kindnet CNI is used by default and hairpin is enabled - {"auto", []string{}, "", "", ContainerRuntime() != "docker"}, - {"kubenet", []string{"--network-plugin=kubenet"}, "kubenet", "", true}, - {"bridge", []string{"--cni=bridge"}, "cni", "", true}, - {"enable-default-cni", []string{"--enable-default-cni=true"}, "cni", "", true}, - {"flannel", []string{"--cni=flannel"}, "cni", "app=flannel", true}, - {"kindnet", []string{"--cni=kindnet"}, "cni", "app=kindnet", true}, - {"false", []string{"--cni=false"}, "", "", false}, - {"custom-flannel", []string{fmt.Sprintf("--cni=%s", filepath.Join(*testdataDir, "kube-flannel.yaml"))}, "cni", "", true}, - {"calico", []string{"--cni=calico"}, "cni", "k8s-app=calico-node", true}, - {"cilium", []string{"--cni=cilium"}, "cni", "k8s-app=cilium", true}, + {"auto", []string{}, "", "", "", true}, + {"kubenet", []string{"--network-plugin=kubenet"}, "kubenet", "", "", true}, + {"bridge", []string{"--cni=bridge"}, "cni", "", "", true}, + {"enable-default-cni", []string{"--enable-default-cni=true"}, "cni", "", "", true}, + {"flannel", []string{"--cni=flannel"}, "cni", "app=flannel", "kube-flannel", true}, + {"kindnet", []string{"--cni=kindnet"}, "cni", "app=kindnet", "kube-system", true}, + {"false", []string{"--cni=false"}, "", "", "", true}, + {"custom-flannel", []string{fmt.Sprintf("--cni=%s", filepath.Join(*testdataDir, "kube-flannel.yaml"))}, "cni", "", "kube-flannel", true}, + {"calico", []string{"--cni=calico"}, "cni", "k8s-app=calico-node", "kube-system", true}, + {"cilium", []string{"--cni=cilium"}, "cni", "k8s-app=cilium", "kube-system", true}, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { - profile := UniqueProfileName(tc.name) + profile := tc.name + suffix - ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) + ctx, cancel := context.WithTimeout(context.Background(), Minutes(90)) defer CleanupWithLogs(t, profile, cancel) - - if DockerDriver() && strings.Contains(tc.name, "flannel") { - t.Skipf("flannel is not yet compatible with Docker driver: iptables v1.8.3 (legacy): Couldn't load target `CNI-x': No such file or directory") - } + // collect debug logs + defer debugLogs(t, profile) if ContainerRuntime() != "docker" && tc.name == "false" { // CNI is required for current container runtime @@ -91,10 +92,19 @@ func TestNetworkPlugins(t *testing.T) { t.Skipf("Skipping the test as %s container runtimes requires CNI", ContainerRuntime()) } + // (current) cilium is known to mess up the system when interfering with other network tests, so we disable it for now - probably needs updating? + // hint: most probably the problem is in combination of: containerd + (outdated) cgroup_v1(cgroupfs) + (outdated) cilium, on systemd it should work + // unfortunately, cilium changed how cni is deployed and does not provide manifests anymore (since v1.9) so that we can "just update" ours + // ref: https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/ + // ref: https://docs.cilium.io/en/stable/gettingstarted/k8s-install-kubeadm/ + if tc.name == "cilium" { + t.Skip("Skipping the test as it's interfering with other tests and is outdated") + } + start := time.Now() MaybeParallel(t) - startArgs := append([]string{"start", "-p", profile, "--memory=2048", "--alsologtostderr", "--wait=true", "--wait-timeout=5m"}, tc.args...) + startArgs := append([]string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "--wait=true", "--wait-timeout=15m"}, tc.args...) startArgs = append(startArgs, StartArgs()...) t.Run("Start", func(t *testing.T) { @@ -106,7 +116,7 @@ func TestNetworkPlugins(t *testing.T) { if !t.Failed() && tc.podLabel != "" { t.Run("ControllerPod", func(t *testing.T) { - if _, err := PodWait(ctx, t, profile, "kube-system", tc.podLabel, Minutes(10)); err != nil { + if _, err := PodWait(ctx, t, profile, tc.namespace, tc.podLabel, Minutes(10)); err != nil { t.Fatalf("failed waiting for %s labeled pod: %v", tc.podLabel, err) } }) @@ -152,14 +162,9 @@ func TestNetworkPlugins(t *testing.T) { if _, err := PodWait(ctx, t, profile, "default", "app=netcat", Minutes(15)); err != nil { t.Fatalf("failed waiting for netcat pod: %v", err) } - }) } - if strings.Contains(tc.name, "weave") { - t.Skipf("skipping remaining tests for weave, as results can be unpredictable") - } - if !t.Failed() { t.Run("DNS", func(t *testing.T) { var rr *RunResult @@ -213,6 +218,11 @@ func TestNetworkPlugins(t *testing.T) { func validateFalseCNI(ctx context.Context, t *testing.T, profile string) { cr := ContainerRuntime() + // override cri-o name + if cr == "cri-o" { + cr = "crio" + } + startArgs := []string{"start", "-p", profile, "--memory=2048", "--alsologtostderr", "--cni=false"} startArgs = append(startArgs, StartArgs()...) @@ -244,7 +254,7 @@ func validateHairpinMode(ctx context.Context, t *testing.T, profile string, hair } } else { if tryHairPin() == nil { - t.Fatalf("hairpin connection unexpectedly succeeded - misconfigured test?") + t.Errorf("hairpin connection unexpectedly succeeded - misconfigured test?") } } } @@ -268,3 +278,362 @@ func verifyKubeletFlagsOutput(t *testing.T, k8sVersion, kubeletPlugin, out strin t.Errorf("expected --network-plugin=%s, got %s", kubeletPlugin, out) } } + +// debug logs for dns and other network issues +func debugLogs(t *testing.T, profile string) { + t.Helper() + + start := time.Now() + + var output strings.Builder + output.WriteString(fmt.Sprintf("----------------------- debugLogs start: %s [pass: %v] --------------------------------", profile, !t.Failed())) + + // basic nslookup + cmd := exec.Command("kubectl", "--context", profile, "exec", "deployment/netcat", "--", "nslookup", "-timeout=5", "kubernetes.default") + out, err := cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> netcat: nslookup kubernetes.default:\n%s\n", out)) + // skip some checks if no issues or lower-level connectivity issues + if err == nil && !strings.Contains(string(out), "10.96.0.1") || err != nil && !strings.Contains(string(out), ";; connection timed out; no servers could be reached") { // for both nslookup and dig + // nslookup trace search + cmd = exec.Command("kubectl", "--context", profile, "exec", "deployment/netcat", "--", "nslookup", "-timeout=5", "-debug", "-type=a", "kubernetes.default") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> netcat: nslookup debug kubernetes.default a-records:\n%s\n", out)) + + // dig trace search udp + cmd = exec.Command("kubectl", "--context", profile, "exec", "deployment/netcat", "--", "dig", "+timeout=5", "+search", "+showsearch", "kubernetes.default") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> netcat: dig search kubernetes.default:\n%s\n", out)) + // dig trace direct udp + cmd = exec.Command("kubectl", "--context", profile, "exec", "deployment/netcat", "--", "dig", "+timeout=5", "@10.96.0.10", "kubernetes.default.svc.cluster.local") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local udp/53:\n%s\n", out)) + // dig trace direct tcp + cmd = exec.Command("kubectl", "--context", profile, "exec", "deployment/netcat", "--", "dig", "+timeout=5", "@10.96.0.10", "+tcp", "kubernetes.default.svc.cluster.local") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local tcp/53:\n%s\n", out)) + } + + // check udp connectivity + cmd = exec.Command("kubectl", "--context", profile, "exec", "deployment/netcat", "--", "nc", "-w", "5", "-z", "-n", "-v", "-u", "10.96.0.10", "53") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> netcat: nc 10.96.0.10 udp/53:\n%s\n", out)) + // check tcp connectivity + cmd = exec.Command("kubectl", "--context", profile, "exec", "deployment/netcat", "--", "nc", "-w", "5", "-z", "-n", "-v", "10.96.0.10", "53") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> netcat: nc 10.96.0.10 tcp/53:\n%s\n", out)) + + // pod's dns env + cmd = exec.Command("kubectl", "--context", profile, "exec", "deployment/netcat", "--", "cat", "/etc/nsswitch.conf") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> netcat: /etc/nsswitch.conf:\n%s\n", out)) + cmd = exec.Command("kubectl", "--context", profile, "exec", "deployment/netcat", "--", "cat", "/etc/hosts") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> netcat: /etc/hosts:\n%s\n", out)) + cmd = exec.Command("kubectl", "--context", profile, "exec", "deployment/netcat", "--", "cat", "/etc/resolv.conf") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> netcat: /etc/resolv.conf:\n%s\n", out)) + + // "host's" dns env + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo cat /etc/nsswitch.conf") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: /etc/nsswitch.conf:\n%s\n", out)) + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo cat /etc/hosts") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: /etc/hosts:\n%s\n", out)) + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo cat /etc/resolv.conf") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: /etc/resolv.conf:\n%s\n", out)) + + // k8s resources overview + cmd = exec.Command("kubectl", "--context", profile, "get", "node,svc,ep,ds,deploy,pods", "-A", "-owide") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, :\n%s\n", out)) + + // crictl pods overview + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo crictl pods") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: crictl pods:\n%s\n", out)) + // crictl containers overview + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo crictl ps --all") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: crictl containers:\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "describe", "deployment", "-n", "default", "--selector=app=netcat") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe netcat deployment:\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "describe", "pods", "-n", "default", "--selector=app=netcat") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe netcat pod(s):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "-n", "default", "--selector=app=netcat", "--tail=-1") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: netcat logs:\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "describe", "deployment", "-n", "kube-system", "--selector=k8s-app=kube-dns") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe coredns deployment:\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "describe", "pods", "-n", "kube-system", "--selector=k8s-app=kube-dns") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe coredns pods:\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "-n", "kube-system", "--selector=k8s-app=kube-dns", "--tail=-1") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: coredns logs:\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "describe", "pods", "-n", "kube-system", "--selector=component=kube-apiserver") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe api server pod(s):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "-n", "kube-system", "--selector=component=kube-apiserver", "--tail=-1") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: api server logs:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo find /etc/cni -type f -exec sh -c 'echo {}; cat {}' \\;") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: /etc/cni:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo ip a s") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: ip a s:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo ip r s") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: ip r s:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo iptables-save") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: iptables-save:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo iptables -t nat -L -n -v") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: iptables table nat:\n%s\n", out)) + + if strings.Contains(profile, "flannel") { + cmd = exec.Command("kubectl", "--context", profile, "describe", "ds", "-A", "--selector=app=flannel") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe flannel daemon set:\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "describe", "pods", "-A", "--selector=app=flannel") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe flannel pod(s):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "--namespace=kube-flannel", "--selector=app=flannel", "--all-containers", "--prefix", "--ignore-errors") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: flannel container(s) logs (current):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "--namespace=kube-flannel", "--selector=app=flannel", "--all-containers", "--prefix", "--ignore-errors", "--previous") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: flannel container(s) logs (previous):\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo cat /run/flannel/subnet.env") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: /run/flannel/subnet.env:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo cat /etc/kube-flannel/cni-conf.json") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: /etc/kube-flannel/cni-conf.json:\n%s\n", out)) + } + + if strings.Contains(profile, "calico") { + cmd = exec.Command("kubectl", "--context", profile, "describe", "ds", "-A", "--selector=k8s-app=calico-node") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe calico daemon set:\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "describe", "pods", "-A", "--selector=k8s-app=calico-node") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe calico daemon set pod(s):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "--namespace=kube-system", "--selector=k8s-app=calico-node", "--all-containers", "--prefix", "--ignore-errors") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: calico daemon set container(s) logs (current):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "--namespace=kube-system", "--selector=k8s-app=calico-node", "--all-containers", "--prefix", "--ignore-errors", "--previous") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: calico daemon set container(s) logs (previous):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "describe", "deploy", "-A", "--selector=k8s-app=calico-kube-controllers") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe calico deployment:\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "describe", "pods", "-A", "--selector=k8s-app=calico-kube-controllers") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe calico deployment pod(s):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "--namespace=kube-system", "--selector=k8s-app=calico-kube-controllers", "--all-containers", "--prefix", "--ignore-errors") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: calico deployment container(s) logs (current):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "--namespace=kube-system", "--selector=k8s-app=calico-kube-controllers", "--all-containers", "--prefix", "--ignore-errors", "--previous") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: calico deployment container(s) logs (previous):\n%s\n", out)) + } + + if strings.Contains(profile, "cilium") { + cmd = exec.Command("kubectl", "--context", profile, "describe", "ds", "-A", "--selector=k8s-app=cilium") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe cilium daemon set:\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "describe", "pods", "-A", "--selector=k8s-app=cilium") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe cilium daemon set pod(s):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "--namespace=kube-system", "--selector=k8s-app=cilium", "--all-containers", "--prefix", "--ignore-errors") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: cilium daemon set container(s) logs (current):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "--namespace=kube-system", "--selector=k8s-app=cilium", "--all-containers", "--prefix", "--ignore-errors", "--previous") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: cilium daemon set container(s) logs (previous):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "describe", "deploy", "-A", "--selector=name=cilium-operator") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe cilium deployment:\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "describe", "pods", "-A", "--selector=name=cilium-operator") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe cilium deployment pod(s):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "--namespace=kube-system", "--selector=name=cilium-operator", "--all-containers", "--prefix", "--ignore-errors") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: cilium deployment container(s) logs (current):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "--namespace=kube-system", "--selector=name=cilium-operator", "--all-containers", "--prefix", "--ignore-errors", "--previous") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: cilium deployment container(s) logs (previous):\n%s\n", out)) + } + + if strings.Contains(profile, "kindnet") { + cmd = exec.Command("kubectl", "--context", profile, "describe", "ds", "-A", "--selector=app=kindnet") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe kindnet daemon set:\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "describe", "pods", "-A", "--selector=app=kindnet") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe kindnet pod(s):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "--namespace=kube-system", "--selector=app=kindnet", "--all-containers", "--prefix", "--ignore-errors") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: kindnet container(s) logs (current):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "--namespace=kube-system", "--selector=app=kindnet", "--all-containers", "--prefix", "--ignore-errors", "--previous") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: kindnet container(s) logs (previous):\n%s\n", out)) + } + + cmd = exec.Command("kubectl", "--context", profile, "describe", "ds", "-n", "kube-system", "--selector=k8s-app=kube-proxy") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe kube-proxy daemon set:\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "describe", "pods", "-n", "kube-system", "--selector=k8s-app=kube-proxy") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: describe kube-proxy pod(s):\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "logs", "-n", "kube-system", "--selector=k8s-app=kube-proxy", "--tail=-1") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: kube-proxy logs:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo systemctl status kubelet --all --full --no-pager") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: kubelet daemon status:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo systemctl cat kubelet --no-pager") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: kubelet daemon config:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo journalctl -xeu kubelet --all --full --no-pager") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: kubelet logs:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo cat /etc/kubernetes/kubelet.conf") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: /etc/kubernetes/kubelet.conf:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo cat /var/lib/kubelet/config.yaml") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: /var/lib/kubelet/config.yaml:\n%s\n", out)) + + cmd = exec.Command("kubectl", "config", "view") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: kubectl config:\n%s\n", out)) + + cmd = exec.Command("kubectl", "--context", profile, "get", "cm", "-A", "-oyaml") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> k8s: cms:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo systemctl status docker --all --full --no-pager") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: docker daemon status:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo systemctl cat docker --no-pager") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: docker daemon config:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo cat /etc/docker/daemon.json") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: /etc/docker/daemon.json:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo docker system info") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: docker system info:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo systemctl status cri-docker --all --full --no-pager") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: cri-docker daemon status:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo systemctl cat cri-docker --no-pager") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: cri-docker daemon config:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo cat /usr/lib/systemd/system/cri-docker.service") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: /usr/lib/systemd/system/cri-docker.service:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo cri-dockerd --version") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: cri-dockerd version:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo systemctl status containerd --all --full --no-pager") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: containerd daemon status:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo systemctl cat containerd --no-pager") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: containerd daemon config:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo cat /lib/systemd/system/containerd.service") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: /lib/systemd/system/containerd.service:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo cat /etc/containerd/config.toml") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: /etc/containerd/config.toml:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo containerd config dump") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: containerd config dump:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo systemctl status crio --all --full --no-pager") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: crio daemon status:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo systemctl cat crio --no-pager") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: crio daemon config:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \\;") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: /etc/crio:\n%s\n", out)) + + cmd = exec.Command(Target(), "ssh", "-p", profile, "sudo crio config") + out, _ = cmd.CombinedOutput() + output.WriteString(fmt.Sprintf("\n>>> host: crio config:\n%s\n", out)) + + output.WriteString(fmt.Sprintf("----------------------- debugLogs end: %s [took: %v] --------------------------------", profile, time.Since(start))) + t.Logf("\n%s\n", output.String()) +} diff --git a/test/integration/preload_test.go b/test/integration/preload_test.go index 956e4666e6..325d9ba3a5 100644 --- a/test/integration/preload_test.go +++ b/test/integration/preload_test.go @@ -59,11 +59,15 @@ func TestPreload(t *testing.T) { t.Fatalf("%s failed: %v", rr.Command(), err) } - // Restart minikube with v1.24.6, which has a preloaded tarball + // stop the cluster + rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile)) + if err != nil { + t.Fatalf("%s failed: %v", rr.Command(), err) + } + + // re-start the cluster and check if image is preserved startArgs = []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1", "--wait=true"} startArgs = append(startArgs, StartArgs()...) - k8sVersion = "v1.24.6" - startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { t.Fatalf("%s failed: %v", rr.Command(), err) @@ -78,6 +82,6 @@ func TestPreload(t *testing.T) { t.Fatalf("%s failed: %v", rr.Command(), err) } if !strings.Contains(rr.Output(), image) { - t.Fatalf("Expected to find %s in output of `docker images`, instead got %s", image, rr.Output()) + t.Fatalf("Expected to find %s in image list output, instead got %s", image, rr.Output()) } } diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index c30a07f6b8..e8435c3e3d 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -61,7 +61,7 @@ func TestStartStop(t *testing.T) { "--feature-gates", "ServerSideApply=true", "--network-plugin=cni", - "--extra-config=kubeadm.pod-network-cidr=192.168.111.111/16", + "--extra-config=kubeadm.pod-network-cidr=10.42.0.0/16", }}, {"default-k8s-diff-port", constants.DefaultKubernetesVersion, []string{ "--apiserver-port=8444", diff --git a/test/integration/testdata/kube-flannel.yaml b/test/integration/testdata/kube-flannel.yaml index 833fd58183..0bc0d084b8 100644 --- a/test/integration/testdata/kube-flannel.yaml +++ b/test/integration/testdata/kube-flannel.yaml @@ -2,7 +2,7 @@ kind: Namespace apiVersion: v1 metadata: - name: kube-system + name: kube-flannel labels: pod-security.kubernetes.io/enforce: privileged --- @@ -22,6 +22,7 @@ rules: resources: - nodes verbs: + - get - list - watch - apiGroups: @@ -42,19 +43,19 @@ roleRef: subjects: - kind: ServiceAccount name: flannel - namespace: kube-system + namespace: kube-flannel --- apiVersion: v1 kind: ServiceAccount metadata: name: flannel - namespace: kube-system + namespace: kube-flannel --- kind: ConfigMap apiVersion: v1 metadata: name: kube-flannel-cfg - namespace: kube-system + namespace: kube-flannel labels: tier: node app: flannel @@ -91,7 +92,7 @@ apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel-ds - namespace: kube-system + namespace: kube-flannel labels: tier: node app: flannel @@ -122,8 +123,8 @@ spec: serviceAccountName: flannel initContainers: - name: install-cni-plugin - #image: flannelcni/flannel-cni-plugin:v1.0.1 for ppc64le and mips64le (dockerhub limitations may apply) - image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1 + #image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply) + image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0 command: - cp args: @@ -134,8 +135,8 @@ spec: - name: cni-plugin mountPath: /opt/cni/bin - name: install-cni - #image: flannelcni/flannel:v0.17.0 for ppc64le and mips64le (dockerhub limitations may apply) - image: rancher/mirrored-flannelcni-flannel:v0.17.0 + #image: flannelcni/flannel:v0.20.2 for ppc64le and mips64le (dockerhub limitations may apply) + image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2 command: - cp args: @@ -149,8 +150,8 @@ spec: mountPath: /etc/kube-flannel/ containers: - name: kube-flannel - #image: flannelcni/flannel:v0.17.0 for ppc64le and mips64le (dockerhub limitations may apply) - image: rancher/mirrored-flannelcni-flannel:v0.17.0 + #image: flannelcni/flannel:v0.20.2 for ppc64le and mips64le (dockerhub limitations may apply) + image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2 command: - /opt/bin/flanneld args: diff --git a/test/integration/testdata/netcat-deployment-nomaster.yaml b/test/integration/testdata/netcat-deployment-nomaster.yaml index 6bc8f37f73..607c65af12 100644 --- a/test/integration/testdata/netcat-deployment-nomaster.yaml +++ b/test/integration/testdata/netcat-deployment-nomaster.yaml @@ -17,7 +17,8 @@ spec: containers: # dnsutils is easier to debug DNS issues with than the standard busybox image - name: dnsutils - image: k8s.gcr.io/e2e-test-images/agnhost:2.32 + # https://github.com/kubernetes/kubernetes/blob/master/test/images/agnhost/README.md + image: registry.k8s.io/e2e-test-images/agnhost:2.40 command: ["/bin/sh", "-c", "while true; do echo hello | nc -l -p 8080; done"] affinity: diff --git a/test/integration/testdata/netcat-deployment.yaml b/test/integration/testdata/netcat-deployment.yaml index 48930cb55d..dd1e1a0c43 100644 --- a/test/integration/testdata/netcat-deployment.yaml +++ b/test/integration/testdata/netcat-deployment.yaml @@ -17,7 +17,9 @@ spec: containers: # dnsutils is easier to debug DNS issues with than the standard busybox image - name: dnsutils - image: k8s.gcr.io/e2e-test-images/agnhost:2.32 + # https://github.com/kubernetes/kubernetes/blob/master/test/images/agnhost/README.md + image: registry.k8s.io/e2e-test-images/agnhost:2.40 + imagePullPolicy: IfNotPresent command: ["/bin/sh", "-c", "while true; do echo hello | nc -l -p 8080; done"] --- diff --git a/test/integration/util_test.go b/test/integration/util_test.go index ab8421fbb1..b65f8b58cf 100644 --- a/test/integration/util_test.go +++ b/test/integration/util_test.go @@ -35,7 +35,7 @@ func UniqueProfileName(prefix string) string { return "minikube" } // example: prefix-162239 - return fmt.Sprintf("%s-%s", prefix, time.Now().Format("150405")) + return fmt.Sprintf("%s-%s", prefix, fmt.Sprintf("%06d", time.Now().UnixNano()%1000000)) } // auditContains checks if the provided string is contained within the logs. diff --git a/test/integration/version_upgrade_test.go b/test/integration/version_upgrade_test.go index 8187f58411..079e0eb779 100644 --- a/test/integration/version_upgrade_test.go +++ b/test/integration/version_upgrade_test.go @@ -71,8 +71,9 @@ func legacyVersion() string { } // the version containerd in ISO was upgraded to 1.4.2 // we need it to use runc.v2 plugin + // note: Test*BinaryUpgrade require minikube v1.22+ to satisfy newer containerd config structure if ContainerRuntime() == "containerd" { - version = "v1.16.0" + version = "v1.22.0" } return version } @@ -120,8 +121,8 @@ func TestRunningBinaryUpgrade(t *testing.T) { if err != nil { t.Fatalf("failed to create temp file for legacy kubeconfig %v", err) } - defer os.Remove(legacyKubeConfig.Name()) // clean up + legacyEnv = append(legacyEnv, fmt.Sprintf("KUBECONFIG=%s", legacyKubeConfig.Name())) c.Env = legacyEnv rr, err = Run(t, c)