Merge pull request #13251 from afbjorklund/default-runtime

Make the default container runtime dynamic
pull/13548/head
Sharif Elgamal 2022-02-02 18:02:05 -08:00 committed by GitHub
commit 423b816311
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 89 additions and 27 deletions

View File

@ -280,7 +280,7 @@ var dockerEnvCmd = &cobra.Command{
exit.Message(reason.EnvMultiConflict, `The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/`) exit.Message(reason.EnvMultiConflict, `The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/`)
} }
if co.Config.KubernetesConfig.ContainerRuntime != "docker" { if co.Config.KubernetesConfig.ContainerRuntime != constants.Docker {
exit.Message(reason.Usage, `The docker-env command is only compatible with the "docker" runtime, but this cluster was configured to use the "{{.runtime}}" runtime.`, exit.Message(reason.Usage, `The docker-env command is only compatible with the "docker" runtime, but this cluster was configured to use the "{{.runtime}}" runtime.`,
out.V{"runtime": co.Config.KubernetesConfig.ContainerRuntime}) out.V{"runtime": co.Config.KubernetesConfig.ContainerRuntime})
} }

View File

@ -170,6 +170,11 @@ var podmanEnvCmd = &cobra.Command{
exit.Message(reason.Usage, `The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/`) exit.Message(reason.Usage, `The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/`)
} }
if co.Config.KubernetesConfig.ContainerRuntime != constants.CRIO {
exit.Message(reason.Usage, `The podman-env command is only compatible with the "crio" runtime, but this cluster was configured to use the "{{.runtime}}" runtime.`,
out.V{"runtime": co.Config.KubernetesConfig.ContainerRuntime})
}
r := co.CP.Runner r := co.CP.Runner
if ok := isPodmanAvailable(r); !ok { if ok := isPodmanAvailable(r); !ok {
exit.Message(reason.EnvPodmanUnavailable, `The podman service within '{{.cluster}}' is not active`, out.V{"cluster": cname}) exit.Message(reason.EnvPodmanUnavailable, `The podman service within '{{.cluster}}' is not active`, out.V{"cluster": cname})

View File

@ -188,6 +188,7 @@ func runStart(cmd *cobra.Command, args []string) {
validateSpecifiedDriver(existing) validateSpecifiedDriver(existing)
validateKubernetesVersion(existing) validateKubernetesVersion(existing)
validateContainerRuntime(existing)
ds, alts, specified := selectDriver(existing) ds, alts, specified := selectDriver(existing)
if cmd.Flag(kicBaseImage).Changed { if cmd.Flag(kicBaseImage).Changed {
@ -270,7 +271,7 @@ func runStart(cmd *cobra.Command, args []string) {
exit.Error(reason.GuestStart, "failed to start node", err) exit.Error(reason.GuestStart, "failed to start node", err)
} }
if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Cfg.Name); err != nil { if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Node.ContainerRuntime, starter.Cfg.Name); err != nil {
klog.Errorf("kubectl info: %v", err) klog.Errorf("kubectl info: %v", err)
} }
} }
@ -302,7 +303,8 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *
} }
k8sVersion := getKubernetesVersion(existing) k8sVersion := getKubernetesVersion(existing)
cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, driverName) rtime := getContainerRuntime(existing)
cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, rtime, driverName)
if err != nil { if err != nil {
return node.Starter{}, errors.Wrap(err, "Failed to generate config") return node.Starter{}, errors.Wrap(err, "Failed to generate config")
} }
@ -381,6 +383,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.
Worker: true, Worker: true,
ControlPlane: false, ControlPlane: false,
KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion,
ContainerRuntime: starter.Cfg.KubernetesConfig.ContainerRuntime,
} }
out.Ln("") // extra newline for clarity on the command line out.Ln("") // extra newline for clarity on the command line
err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure)) err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure))
@ -439,15 +442,15 @@ func displayEnviron(env []string) {
} }
} }
func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion, machineName string) error { func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion, rtime, machineName string) error {
if k8sVersion == constants.NoKubernetesVersion { if k8sVersion == constants.NoKubernetesVersion {
register.Reg.SetStep(register.Done) register.Reg.SetStep(register.Done)
out.Step(style.Ready, "Done! minikube is ready without Kubernetes!") out.Step(style.Ready, "Done! minikube is ready without Kubernetes!")
// Runtime message. // Runtime message.
boxConfig := box.Config{Py: 1, Px: 4, Type: "Round", Color: "Green"} boxConfig := box.Config{Py: 1, Px: 4, Type: "Round", Color: "Green"}
switch viper.GetString(containerRuntime) { switch rtime {
case constants.DefaultContainerRuntime: case constants.Docker:
out.BoxedWithConfig(boxConfig, style.Tip, "Things to try without Kubernetes ...", `- "minikube ssh" to SSH into minikube's node. out.BoxedWithConfig(boxConfig, style.Tip, "Things to try without Kubernetes ...", `- "minikube ssh" to SSH into minikube's node.
- "minikube docker-env" to point your docker-cli to the docker inside minikube. - "minikube docker-env" to point your docker-cli to the docker inside minikube.
- "minikube image" to build images without docker.`) - "minikube image" to build images without docker.`)
@ -1183,9 +1186,10 @@ func validateFlags(cmd *cobra.Command, drvName string) {
exit.Message(reason.DrvUnsupportedProfile, "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/", out.V{"name": drvName}) exit.Message(reason.DrvUnsupportedProfile, "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/", out.V{"name": drvName})
} }
runtime := viper.GetString(containerRuntime) // default container runtime varies, starting with Kubernetes 1.24 - assume that only the default container runtime has been tested
if runtime != "docker" { rtime := viper.GetString(containerRuntime)
out.WarningT("Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!", out.V{"runtime": runtime}) if rtime != constants.DefaultContainerRuntime && rtime != defaultRuntime(getKubernetesVersion(nil)) {
out.WarningT("Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!", out.V{"runtime": rtime})
} }
// conntrack is required starting with Kubernetes 1.18, include the release candidates for completion // conntrack is required starting with Kubernetes 1.18, include the release candidates for completion
@ -1285,6 +1289,10 @@ func validateRuntime(rtime string) error {
// `crio` is accepted as an alternative spelling to `cri-o` // `crio` is accepted as an alternative spelling to `cri-o`
validOptions = append(validOptions, constants.CRIO) validOptions = append(validOptions, constants.CRIO)
if rtime == constants.DefaultContainerRuntime {
return nil
}
var validRuntime bool var validRuntime bool
for _, option := range validOptions { for _, option := range validOptions {
if rtime == option { if rtime == option {
@ -1308,9 +1316,31 @@ func validateRuntime(rtime string) error {
return nil return nil
} }
func getContainerRuntime(old *config.ClusterConfig) string {
paramRuntime := viper.GetString(containerRuntime)
// try to load the old version first if the user didn't specify anything
if paramRuntime == constants.DefaultContainerRuntime && old != nil {
paramRuntime = old.KubernetesConfig.ContainerRuntime
}
if paramRuntime == constants.DefaultContainerRuntime {
k8sVersion := getKubernetesVersion(old)
paramRuntime = defaultRuntime(k8sVersion)
}
return paramRuntime
}
// defaultRuntime returns the default container runtime
func defaultRuntime(k8sVersion string) string {
// minikube default
return constants.Docker
}
// if container runtime is not docker, check that cni is not disabled // if container runtime is not docker, check that cni is not disabled
func validateCNI(cmd *cobra.Command, runtime string) { func validateCNI(cmd *cobra.Command, runtime string) {
if runtime == "docker" { if runtime == constants.Docker {
return return
} }
if cmd.Flags().Changed(cniFlag) && strings.ToLower(viper.GetString(cniFlag)) == "false" { if cmd.Flags().Changed(cniFlag) && strings.ToLower(viper.GetString(cniFlag)) == "false" {
@ -1458,6 +1488,7 @@ func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.C
if existing != nil { if existing != nil {
cp, err := config.PrimaryControlPlane(existing) cp, err := config.PrimaryControlPlane(existing)
cp.KubernetesVersion = getKubernetesVersion(&cc) cp.KubernetesVersion = getKubernetesVersion(&cc)
cp.ContainerRuntime = getContainerRuntime(&cc)
if err != nil { if err != nil {
return cc, config.Node{}, err return cc, config.Node{}, err
} }
@ -1467,6 +1498,7 @@ func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.C
nodes := []config.Node{} nodes := []config.Node{}
for _, n := range existing.Nodes { for _, n := range existing.Nodes {
n.KubernetesVersion = getKubernetesVersion(&cc) n.KubernetesVersion = getKubernetesVersion(&cc)
n.ContainerRuntime = getContainerRuntime(&cc)
nodes = append(nodes, n) nodes = append(nodes, n)
} }
cc.Nodes = nodes cc.Nodes = nodes
@ -1477,6 +1509,7 @@ func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.C
cp := config.Node{ cp := config.Node{
Port: cc.KubernetesConfig.NodePort, Port: cc.KubernetesConfig.NodePort,
KubernetesVersion: getKubernetesVersion(&cc), KubernetesVersion: getKubernetesVersion(&cc),
ContainerRuntime: getContainerRuntime(&cc),
Name: kubeNodeName, Name: kubeNodeName,
ControlPlane: true, ControlPlane: true,
Worker: true, Worker: true,
@ -1573,6 +1606,17 @@ func validateKubernetesVersion(old *config.ClusterConfig) {
} }
} }
// validateContainerRuntime ensures that the container runtime is reasonable
func validateContainerRuntime(old *config.ClusterConfig) {
if old == nil || old.KubernetesConfig.ContainerRuntime == "" {
return
}
if err := validateRuntime(old.KubernetesConfig.ContainerRuntime); err != nil {
klog.Errorf("Error parsing old runtime %q: %v", old.KubernetesConfig.ContainerRuntime, err)
}
}
func isBaseImageApplicable(drv string) bool { func isBaseImageApplicable(drv string) bool {
return registry.IsKIC(drv) return registry.IsKIC(drv)
} }

View File

@ -160,7 +160,7 @@ func initMinikubeFlags() {
startCmd.Flags().String(kicBaseImage, kic.BaseImage, "The base image to use for docker/podman drivers. Intended for local development.") startCmd.Flags().String(kicBaseImage, kic.BaseImage, "The base image to use for docker/podman drivers. Intended for local development.")
startCmd.Flags().Bool(keepContext, false, "This will keep the existing kubectl context and will create a minikube context.") startCmd.Flags().Bool(keepContext, false, "This will keep the existing kubectl context and will create a minikube context.")
startCmd.Flags().Bool(embedCerts, false, "if true, will embed the certs in kubeconfig.") startCmd.Flags().Bool(embedCerts, false, "if true, will embed the certs in kubeconfig.")
startCmd.Flags().String(containerRuntime, constants.DefaultContainerRuntime, fmt.Sprintf("The container runtime to be used (%s).", strings.Join(cruntime.ValidRuntimes(), ", "))) startCmd.Flags().String(containerRuntime, constants.DefaultContainerRuntime, fmt.Sprintf("The container runtime to be used. Valid options: %s (default: auto)", strings.Join(cruntime.ValidRuntimes(), ", ")))
startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.") startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.")
startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.") startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.")
startCmd.Flags().String(mount9PVersion, defaultMount9PVersion, mount9PVersionDescription) startCmd.Flags().String(mount9PVersion, defaultMount9PVersion, mount9PVersionDescription)
@ -274,7 +274,7 @@ func ClusterFlagValue() string {
} }
// generateClusterConfig generate a config.ClusterConfig based on flags or existing cluster config // generateClusterConfig generate a config.ClusterConfig based on flags or existing cluster config
func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k8sVersion string, drvName string) (config.ClusterConfig, config.Node, error) { func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k8sVersion string, rtime string, drvName string) (config.ClusterConfig, config.Node, error) {
var cc config.ClusterConfig var cc config.ClusterConfig
if existing != nil { if existing != nil {
cc = updateExistingConfigFromFlags(cmd, existing) cc = updateExistingConfigFromFlags(cmd, existing)
@ -286,7 +286,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
} }
} else { } else {
klog.Info("no existing cluster config was found, will generate one from the flags ") klog.Info("no existing cluster config was found, will generate one from the flags ")
cc = generateNewConfigFromFlags(cmd, k8sVersion, drvName) cc = generateNewConfigFromFlags(cmd, k8sVersion, rtime, drvName)
cnm, err := cni.New(&cc) cnm, err := cni.New(&cc)
if err != nil { if err != nil {
@ -444,7 +444,7 @@ func getCNIConfig(cmd *cobra.Command) string {
} }
// generateNewConfigFromFlags generate a config.ClusterConfig based on flags // generateNewConfigFromFlags generate a config.ClusterConfig based on flags
func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) config.ClusterConfig { func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, rtime string, drvName string) config.ClusterConfig {
var cc config.ClusterConfig var cc config.ClusterConfig
// networkPlugin cni deprecation warning // networkPlugin cni deprecation warning
@ -526,7 +526,7 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s
APIServerIPs: apiServerIPs, APIServerIPs: apiServerIPs,
DNSDomain: viper.GetString(dnsDomain), DNSDomain: viper.GetString(dnsDomain),
FeatureGates: viper.GetString(featureGates), FeatureGates: viper.GetString(featureGates),
ContainerRuntime: viper.GetString(containerRuntime), ContainerRuntime: rtime,
CRISocket: viper.GetString(criSocket), CRISocket: viper.GetString(criSocket),
NetworkPlugin: chosenNetworkPlugin, NetworkPlugin: chosenNetworkPlugin,
ServiceCIDR: viper.GetString(serviceCIDR), ServiceCIDR: viper.GetString(serviceCIDR),
@ -549,7 +549,7 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s
exit.Message(reason.Usage, "Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)}) exit.Message(reason.Usage, "Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)})
} }
if si.Rootless { if si.Rootless {
if cc.KubernetesConfig.ContainerRuntime == "docker" { if cc.KubernetesConfig.ContainerRuntime == constants.Docker {
exit.Message(reason.Usage, "--container-runtime must be set to \"containerd\" or \"cri-o\" for rootless") exit.Message(reason.Usage, "--container-runtime must be set to \"containerd\" or \"cri-o\" for rootless")
} }
// KubeletInUserNamespace feature gate is essential for rootless driver. // KubeletInUserNamespace feature gate is essential for rootless driver.
@ -729,6 +729,9 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
if cmd.Flags().Changed(kubernetesVersion) { if cmd.Flags().Changed(kubernetesVersion) {
cc.KubernetesConfig.KubernetesVersion = getKubernetesVersion(existing) cc.KubernetesConfig.KubernetesVersion = getKubernetesVersion(existing)
} }
if cmd.Flags().Changed(containerRuntime) {
cc.KubernetesConfig.ContainerRuntime = getContainerRuntime(existing)
}
if cmd.Flags().Changed("extra-config") { if cmd.Flags().Changed("extra-config") {
cc.KubernetesConfig.ExtraOptions = getExtraOptions() cc.KubernetesConfig.ExtraOptions = getExtraOptions()

View File

@ -112,6 +112,7 @@ func TestMirrorCountry(t *testing.T) {
viper.SetDefault(humanReadableDiskSize, defaultDiskSize) viper.SetDefault(humanReadableDiskSize, defaultDiskSize)
checkRepository = checkRepoMock checkRepository = checkRepoMock
k8sVersion := constants.DefaultKubernetesVersion k8sVersion := constants.DefaultKubernetesVersion
rtime := constants.DefaultContainerRuntime
var tests = []struct { var tests = []struct {
description string description string
k8sVersion string k8sVersion string
@ -157,7 +158,7 @@ func TestMirrorCountry(t *testing.T) {
viper.SetDefault(imageRepository, test.imageRepository) viper.SetDefault(imageRepository, test.imageRepository)
viper.SetDefault(imageMirrorCountry, test.mirrorCountry) viper.SetDefault(imageMirrorCountry, test.mirrorCountry)
viper.SetDefault(kvmNUMACount, 1) viper.SetDefault(kvmNUMACount, 1)
config, _, err := generateClusterConfig(cmd, nil, k8sVersion, driver.Mock) config, _, err := generateClusterConfig(cmd, nil, k8sVersion, rtime, driver.Mock)
if err != nil { if err != nil {
t.Fatalf("Got unexpected error %v during config generation", err) t.Fatalf("Got unexpected error %v during config generation", err)
} }
@ -179,6 +180,7 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
} }
}() }()
k8sVersion := constants.NewestKubernetesVersion k8sVersion := constants.NewestKubernetesVersion
rtime := constants.DefaultContainerRuntime
var tests = []struct { var tests = []struct {
description string description string
proxy string proxy string
@ -226,7 +228,7 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
cfg.DockerEnv = []string{} // clear docker env to avoid pollution cfg.DockerEnv = []string{} // clear docker env to avoid pollution
proxy.SetDockerEnv() proxy.SetDockerEnv()
config, _, err := generateClusterConfig(cmd, nil, k8sVersion, "none") config, _, err := generateClusterConfig(cmd, nil, k8sVersion, rtime, "none")
if err != nil { if err != nil {
t.Fatalf("Got unexpected error %v during config generation", err) t.Fatalf("Got unexpected error %v during config generation", err)
} }

View File

@ -155,7 +155,7 @@ func chooseDefault(cc config.ClusterConfig) Manager {
return KindNet{cc: cc} return KindNet{cc: cc}
} }
if cc.KubernetesConfig.ContainerRuntime != "docker" { if cc.KubernetesConfig.ContainerRuntime != constants.Docker {
if driver.IsKIC(cc.Driver) { if driver.IsKIC(cc.Driver) {
klog.Infof("%q driver + %s runtime found, recommending kindnet", cc.Driver, cc.KubernetesConfig.ContainerRuntime) klog.Infof("%q driver + %s runtime found, recommending kindnet", cc.Driver, cc.KubernetesConfig.ContainerRuntime)
return KindNet{cc: cc} return KindNet{cc: cc}

View File

@ -19,6 +19,7 @@ package cni
import ( import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/driver"
) )
@ -34,7 +35,7 @@ func (c Disabled) String() string {
// Apply enables the CNI // Apply enables the CNI
func (c Disabled) Apply(r Runner) error { func (c Disabled) Apply(r Runner) error {
if driver.IsKIC(c.cc.Driver) && c.cc.KubernetesConfig.ContainerRuntime != "docker" { if driver.IsKIC(c.cc.Driver) && c.cc.KubernetesConfig.ContainerRuntime != constants.Docker {
klog.Warningf("CNI is recommended for %q driver and %q runtime - expect networking issues", c.cc.Driver, c.cc.KubernetesConfig.ContainerRuntime) klog.Warningf("CNI is recommended for %q driver and %q runtime - expect networking issues", c.cc.Driver, c.cc.KubernetesConfig.ContainerRuntime)
} }

View File

@ -63,6 +63,7 @@ func PrimaryControlPlane(cc *ClusterConfig) (Node, error) {
IP: cc.KubernetesConfig.NodeIP, IP: cc.KubernetesConfig.NodeIP,
Port: cc.KubernetesConfig.NodePort, Port: cc.KubernetesConfig.NodePort,
KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, KubernetesVersion: cc.KubernetesConfig.KubernetesVersion,
ContainerRuntime: cc.KubernetesConfig.ContainerRuntime,
ControlPlane: true, ControlPlane: true,
Worker: true, Worker: true,
} }

View File

@ -136,6 +136,7 @@ type Node struct {
IP string IP string
Port int Port int
KubernetesVersion string KubernetesVersion string
ContainerRuntime string
ControlPlane bool ControlPlane bool
Worker bool Worker bool
} }

View File

@ -60,8 +60,10 @@ const (
Containerd = "containerd" Containerd = "containerd"
// CRIO is the default name and spelling for the cri-o container runtime // CRIO is the default name and spelling for the cri-o container runtime
CRIO = "crio" CRIO = "crio"
// Docker is the default name and spelling for the docker container runtime
Docker = "docker"
// DefaultContainerRuntime is our default container runtime // DefaultContainerRuntime is our default container runtime
DefaultContainerRuntime = "docker" DefaultContainerRuntime = ""
// APIServerName is the default API server name // APIServerName is the default API server name
APIServerName = "minikubeCA" APIServerName = "minikubeCA"

View File

@ -94,7 +94,7 @@ func testPreloadDownloadPreventsMultipleDownload(t *testing.T) {
var group sync.WaitGroup var group sync.WaitGroup
group.Add(2) group.Add(2)
dlCall := func() { dlCall := func() {
if err := Preload(constants.DefaultKubernetesVersion, constants.DefaultContainerRuntime, "docker"); err != nil { if err := Preload(constants.DefaultKubernetesVersion, constants.Docker, "docker"); err != nil {
t.Logf("Failed to download preload: %+v (may be ok)", err) t.Logf("Failed to download preload: %+v (may be ok)", err)
} }
group.Done() group.Done()
@ -119,7 +119,7 @@ func testPreloadNotExists(t *testing.T) {
getChecksum = func(k8sVersion, containerRuntime string) ([]byte, error) { return []byte("check"), nil } getChecksum = func(k8sVersion, containerRuntime string) ([]byte, error) { return []byte("check"), nil }
ensureChecksumValid = func(k8sVersion, containerRuntime, path string, checksum []byte) error { return nil } ensureChecksumValid = func(k8sVersion, containerRuntime, path string, checksum []byte) error { return nil }
err := Preload(constants.DefaultKubernetesVersion, constants.DefaultContainerRuntime, "docker") err := Preload(constants.DefaultKubernetesVersion, constants.Docker, "docker")
if err != nil { if err != nil {
t.Errorf("Expected no error when preload exists") t.Errorf("Expected no error when preload exists")
} }
@ -140,7 +140,7 @@ func testPreloadChecksumMismatch(t *testing.T) {
return fmt.Errorf("checksum mismatch") return fmt.Errorf("checksum mismatch")
} }
err := Preload(constants.DefaultKubernetesVersion, constants.DefaultContainerRuntime, "docker") err := Preload(constants.DefaultKubernetesVersion, constants.Docker, "docker")
expectedErrMsg := "checksum mismatch" expectedErrMsg := "checksum mismatch"
if err == nil { if err == nil {
t.Errorf("Expected error when checksum mismatches") t.Errorf("Expected error when checksum mismatches")

View File

@ -31,7 +31,7 @@ minikube start [flags]
--cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none. (default true) --cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none. (default true)
--cert-expiration duration Duration until minikube certificate expiration, defaults to three years (26280h). (default 26280h0m0s) --cert-expiration duration Duration until minikube certificate expiration, defaults to three years (26280h). (default 26280h0m0s)
--cni string CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto) --cni string CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)
--container-runtime string The container runtime to be used (docker, cri-o, containerd). (default "docker") --container-runtime string The container runtime to be used. Valid options: docker, cri-o, containerd (default: auto)
--cpus string Number of CPUs allocated to Kubernetes. Use "max" to use the maximum number of CPUs. (default "2") --cpus string Number of CPUs allocated to Kubernetes. Use "max" to use the maximum number of CPUs. (default "2")
--cri-socket string The cri socket path to be used. --cri-socket string The cri socket path to be used.
--delete-on-failure If set, delete the current cluster if start fails and try again. Defaults to false. --delete-on-failure If set, delete the current cluster if start fails and try again. Defaults to false.

View File

@ -89,7 +89,7 @@ minikube start --extra-config=kubeadm.ignore-preflight-errors=SystemVerification
## Runtime configuration ## Runtime configuration
The default container runtime in minikube is Docker. You can select it explicitly by using: The default container runtime in minikube varies. You can select one explicitly by using:
```shell ```shell
minikube start --container-runtime=docker minikube start --container-runtime=docker
@ -100,6 +100,8 @@ Other options available are:
* [containerd](https://github.com/containerd/containerd) * [containerd](https://github.com/containerd/containerd)
* [cri-o](https://github.com/cri-o/cri-o) * [cri-o](https://github.com/cri-o/cri-o)
See <https://kubernetes.io/docs/setup/production-environment/container-runtimes/>
## Environment variables ## Environment variables
minikube supports passing environment variables instead of flags for every value listed in `minikube config`. This is done by passing an environment variable with the prefix `MINIKUBE_`. minikube supports passing environment variables instead of flags for every value listed in `minikube config`. This is done by passing an environment variable with the prefix `MINIKUBE_`.

View File

@ -162,7 +162,7 @@ func ContainerRuntime() string {
return strings.TrimPrefix(s, flag) return strings.TrimPrefix(s, flag)
} }
} }
return constants.DefaultContainerRuntime return constants.Docker
} }
// arm64Platform returns true if running on arm64/* platform // arm64Platform returns true if running on arm64/* platform

View File

@ -650,6 +650,7 @@
"The path on the file system where the testing docs in markdown need to be saved": "", "The path on the file system where the testing docs in markdown need to be saved": "",
"The podman service within '{{.cluster}}' is not active": "", "The podman service within '{{.cluster}}' is not active": "",
"The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "", "The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The podman-env command is only compatible with the \"crio\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
"The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.": "", "The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.": "",
"The service namespace": "", "The service namespace": "",
"The service/ingress {{.resource}} requires privileged ports to be exposed: {{.ports}}": "", "The service/ingress {{.resource}} requires privileged ports to be exposed: {{.ports}}": "",