Make the default container runtime dynamic
Since the dockershim removal, there is no longer a constant default container runtime provided by upstream. Only CRI.pull/13251/head
parent
25d17c2dde
commit
c4800a6115
|
@ -280,7 +280,7 @@ var dockerEnvCmd = &cobra.Command{
|
|||
exit.Message(reason.EnvMultiConflict, `The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/`)
|
||||
}
|
||||
|
||||
if co.Config.KubernetesConfig.ContainerRuntime != "docker" {
|
||||
if co.Config.KubernetesConfig.ContainerRuntime != constants.Docker {
|
||||
exit.Message(reason.Usage, `The docker-env command is only compatible with the "docker" runtime, but this cluster was configured to use the "{{.runtime}}" runtime.`,
|
||||
out.V{"runtime": co.Config.KubernetesConfig.ContainerRuntime})
|
||||
}
|
||||
|
|
|
@ -170,6 +170,11 @@ var podmanEnvCmd = &cobra.Command{
|
|||
exit.Message(reason.Usage, `The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/`)
|
||||
}
|
||||
|
||||
if co.Config.KubernetesConfig.ContainerRuntime != constants.CRIO {
|
||||
exit.Message(reason.Usage, `The podman-env command is only compatible with the "crio" runtime, but this cluster was configured to use the "{{.runtime}}" runtime.`,
|
||||
out.V{"runtime": co.Config.KubernetesConfig.ContainerRuntime})
|
||||
}
|
||||
|
||||
r := co.CP.Runner
|
||||
if ok := isPodmanAvailable(r); !ok {
|
||||
exit.Message(reason.EnvPodmanUnavailable, `The podman service within '{{.cluster}}' is not active`, out.V{"cluster": cname})
|
||||
|
|
|
@ -188,6 +188,7 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
|
||||
validateSpecifiedDriver(existing)
|
||||
validateKubernetesVersion(existing)
|
||||
validateContainerRuntime(existing)
|
||||
|
||||
ds, alts, specified := selectDriver(existing)
|
||||
if cmd.Flag(kicBaseImage).Changed {
|
||||
|
@ -270,7 +271,7 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
exit.Error(reason.GuestStart, "failed to start node", err)
|
||||
}
|
||||
|
||||
if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Cfg.Name); err != nil {
|
||||
if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Node.ContainerRuntime, starter.Cfg.Name); err != nil {
|
||||
klog.Errorf("kubectl info: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -302,7 +303,8 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *
|
|||
}
|
||||
|
||||
k8sVersion := getKubernetesVersion(existing)
|
||||
cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, driverName)
|
||||
rtime := getContainerRuntime(existing)
|
||||
cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, rtime, driverName)
|
||||
if err != nil {
|
||||
return node.Starter{}, errors.Wrap(err, "Failed to generate config")
|
||||
}
|
||||
|
@ -381,6 +383,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.
|
|||
Worker: true,
|
||||
ControlPlane: false,
|
||||
KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion,
|
||||
ContainerRuntime: starter.Cfg.KubernetesConfig.ContainerRuntime,
|
||||
}
|
||||
out.Ln("") // extra newline for clarity on the command line
|
||||
err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure))
|
||||
|
@ -439,15 +442,15 @@ func displayEnviron(env []string) {
|
|||
}
|
||||
}
|
||||
|
||||
func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion, machineName string) error {
|
||||
func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion, rtime, machineName string) error {
|
||||
if k8sVersion == constants.NoKubernetesVersion {
|
||||
register.Reg.SetStep(register.Done)
|
||||
out.Step(style.Ready, "Done! minikube is ready without Kubernetes!")
|
||||
|
||||
// Runtime message.
|
||||
boxConfig := box.Config{Py: 1, Px: 4, Type: "Round", Color: "Green"}
|
||||
switch viper.GetString(containerRuntime) {
|
||||
case constants.DefaultContainerRuntime:
|
||||
switch rtime {
|
||||
case constants.Docker:
|
||||
out.BoxedWithConfig(boxConfig, style.Tip, "Things to try without Kubernetes ...", `- "minikube ssh" to SSH into minikube's node.
|
||||
- "minikube docker-env" to point your docker-cli to the docker inside minikube.
|
||||
- "minikube image" to build images without docker.`)
|
||||
|
@ -1183,9 +1186,10 @@ func validateFlags(cmd *cobra.Command, drvName string) {
|
|||
exit.Message(reason.DrvUnsupportedProfile, "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/", out.V{"name": drvName})
|
||||
}
|
||||
|
||||
runtime := viper.GetString(containerRuntime)
|
||||
if runtime != "docker" {
|
||||
out.WarningT("Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!", out.V{"runtime": runtime})
|
||||
// default container runtime varies, starting with Kubernetes 1.24 - assume that only the default container runtime has been tested
|
||||
rtime := viper.GetString(containerRuntime)
|
||||
if rtime != constants.DefaultContainerRuntime && rtime != defaultRuntime(getKubernetesVersion(nil)) {
|
||||
out.WarningT("Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!", out.V{"runtime": rtime})
|
||||
}
|
||||
|
||||
// conntrack is required starting with Kubernetes 1.18, include the release candidates for completion
|
||||
|
@ -1285,6 +1289,10 @@ func validateRuntime(rtime string) error {
|
|||
// `crio` is accepted as an alternative spelling to `cri-o`
|
||||
validOptions = append(validOptions, constants.CRIO)
|
||||
|
||||
if rtime == constants.DefaultContainerRuntime {
|
||||
return nil
|
||||
}
|
||||
|
||||
var validRuntime bool
|
||||
for _, option := range validOptions {
|
||||
if rtime == option {
|
||||
|
@ -1308,9 +1316,31 @@ func validateRuntime(rtime string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func getContainerRuntime(old *config.ClusterConfig) string {
|
||||
paramRuntime := viper.GetString(containerRuntime)
|
||||
|
||||
// try to load the old version first if the user didn't specify anything
|
||||
if paramRuntime == constants.DefaultContainerRuntime && old != nil {
|
||||
paramRuntime = old.KubernetesConfig.ContainerRuntime
|
||||
}
|
||||
|
||||
if paramRuntime == constants.DefaultContainerRuntime {
|
||||
k8sVersion := getKubernetesVersion(old)
|
||||
paramRuntime = defaultRuntime(k8sVersion)
|
||||
}
|
||||
|
||||
return paramRuntime
|
||||
}
|
||||
|
||||
// defaultRuntime returns the default container runtime
|
||||
func defaultRuntime(k8sVersion string) string {
|
||||
// minikube default
|
||||
return constants.Docker
|
||||
}
|
||||
|
||||
// if container runtime is not docker, check that cni is not disabled
|
||||
func validateCNI(cmd *cobra.Command, runtime string) {
|
||||
if runtime == "docker" {
|
||||
if runtime == constants.Docker {
|
||||
return
|
||||
}
|
||||
if cmd.Flags().Changed(cniFlag) && strings.ToLower(viper.GetString(cniFlag)) == "false" {
|
||||
|
@ -1458,6 +1488,7 @@ func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.C
|
|||
if existing != nil {
|
||||
cp, err := config.PrimaryControlPlane(existing)
|
||||
cp.KubernetesVersion = getKubernetesVersion(&cc)
|
||||
cp.ContainerRuntime = getContainerRuntime(&cc)
|
||||
if err != nil {
|
||||
return cc, config.Node{}, err
|
||||
}
|
||||
|
@ -1467,6 +1498,7 @@ func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.C
|
|||
nodes := []config.Node{}
|
||||
for _, n := range existing.Nodes {
|
||||
n.KubernetesVersion = getKubernetesVersion(&cc)
|
||||
n.ContainerRuntime = getContainerRuntime(&cc)
|
||||
nodes = append(nodes, n)
|
||||
}
|
||||
cc.Nodes = nodes
|
||||
|
@ -1477,6 +1509,7 @@ func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.C
|
|||
cp := config.Node{
|
||||
Port: cc.KubernetesConfig.NodePort,
|
||||
KubernetesVersion: getKubernetesVersion(&cc),
|
||||
ContainerRuntime: getContainerRuntime(&cc),
|
||||
Name: kubeNodeName,
|
||||
ControlPlane: true,
|
||||
Worker: true,
|
||||
|
@ -1573,6 +1606,17 @@ func validateKubernetesVersion(old *config.ClusterConfig) {
|
|||
}
|
||||
}
|
||||
|
||||
// validateContainerRuntime ensures that the container runtime is reasonable
|
||||
func validateContainerRuntime(old *config.ClusterConfig) {
|
||||
if old == nil || old.KubernetesConfig.ContainerRuntime == "" {
|
||||
return
|
||||
}
|
||||
|
||||
if err := validateRuntime(old.KubernetesConfig.ContainerRuntime); err != nil {
|
||||
klog.Errorf("Error parsing old runtime %q: %v", old.KubernetesConfig.ContainerRuntime, err)
|
||||
}
|
||||
}
|
||||
|
||||
func isBaseImageApplicable(drv string) bool {
|
||||
return registry.IsKIC(drv)
|
||||
}
|
||||
|
|
|
@ -158,7 +158,7 @@ func initMinikubeFlags() {
|
|||
startCmd.Flags().String(kicBaseImage, kic.BaseImage, "The base image to use for docker/podman drivers. Intended for local development.")
|
||||
startCmd.Flags().Bool(keepContext, false, "This will keep the existing kubectl context and will create a minikube context.")
|
||||
startCmd.Flags().Bool(embedCerts, false, "if true, will embed the certs in kubeconfig.")
|
||||
startCmd.Flags().String(containerRuntime, constants.DefaultContainerRuntime, fmt.Sprintf("The container runtime to be used (%s).", strings.Join(cruntime.ValidRuntimes(), ", ")))
|
||||
startCmd.Flags().String(containerRuntime, constants.DefaultContainerRuntime, fmt.Sprintf("The container runtime to be used. Valid options: %s (default: auto)", strings.Join(cruntime.ValidRuntimes(), ", ")))
|
||||
startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.")
|
||||
startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.")
|
||||
startCmd.Flags().String(mount9PVersion, defaultMount9PVersion, mount9PVersionDescription)
|
||||
|
@ -270,7 +270,7 @@ func ClusterFlagValue() string {
|
|||
}
|
||||
|
||||
// generateClusterConfig generate a config.ClusterConfig based on flags or existing cluster config
|
||||
func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k8sVersion string, drvName string) (config.ClusterConfig, config.Node, error) {
|
||||
func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k8sVersion string, rtime string, drvName string) (config.ClusterConfig, config.Node, error) {
|
||||
var cc config.ClusterConfig
|
||||
if existing != nil {
|
||||
cc = updateExistingConfigFromFlags(cmd, existing)
|
||||
|
@ -282,7 +282,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
|
|||
}
|
||||
} else {
|
||||
klog.Info("no existing cluster config was found, will generate one from the flags ")
|
||||
cc = generateNewConfigFromFlags(cmd, k8sVersion, drvName)
|
||||
cc = generateNewConfigFromFlags(cmd, k8sVersion, rtime, drvName)
|
||||
|
||||
cnm, err := cni.New(&cc)
|
||||
if err != nil {
|
||||
|
@ -419,7 +419,7 @@ func getCNIConfig(cmd *cobra.Command) string {
|
|||
}
|
||||
|
||||
// generateNewConfigFromFlags generate a config.ClusterConfig based on flags
|
||||
func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) config.ClusterConfig {
|
||||
func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, rtime string, drvName string) config.ClusterConfig {
|
||||
var cc config.ClusterConfig
|
||||
|
||||
// networkPlugin cni deprecation warning
|
||||
|
@ -499,7 +499,7 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s
|
|||
APIServerIPs: apiServerIPs,
|
||||
DNSDomain: viper.GetString(dnsDomain),
|
||||
FeatureGates: viper.GetString(featureGates),
|
||||
ContainerRuntime: viper.GetString(containerRuntime),
|
||||
ContainerRuntime: rtime,
|
||||
CRISocket: viper.GetString(criSocket),
|
||||
NetworkPlugin: chosenNetworkPlugin,
|
||||
ServiceCIDR: viper.GetString(serviceCIDR),
|
||||
|
@ -533,7 +533,7 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s
|
|||
exit.Message(reason.Usage, "Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)})
|
||||
}
|
||||
if si.Rootless {
|
||||
if cc.KubernetesConfig.ContainerRuntime == "docker" {
|
||||
if cc.KubernetesConfig.ContainerRuntime == constants.Docker {
|
||||
exit.Message(reason.Usage, "--container-runtime must be set to \"containerd\" or \"cri-o\" for rootless")
|
||||
}
|
||||
// KubeletInUserNamespace feature gate is essential for rootless driver.
|
||||
|
@ -711,6 +711,9 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
|
|||
if cmd.Flags().Changed(kubernetesVersion) {
|
||||
cc.KubernetesConfig.KubernetesVersion = getKubernetesVersion(existing)
|
||||
}
|
||||
if cmd.Flags().Changed(containerRuntime) {
|
||||
cc.KubernetesConfig.ContainerRuntime = getContainerRuntime(existing)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("extra-config") {
|
||||
cc.KubernetesConfig.ExtraOptions = config.ExtraOptions
|
||||
|
|
|
@ -112,6 +112,7 @@ func TestMirrorCountry(t *testing.T) {
|
|||
viper.SetDefault(humanReadableDiskSize, defaultDiskSize)
|
||||
checkRepository = checkRepoMock
|
||||
k8sVersion := constants.DefaultKubernetesVersion
|
||||
rtime := constants.DefaultContainerRuntime
|
||||
var tests = []struct {
|
||||
description string
|
||||
k8sVersion string
|
||||
|
@ -157,7 +158,7 @@ func TestMirrorCountry(t *testing.T) {
|
|||
viper.SetDefault(imageRepository, test.imageRepository)
|
||||
viper.SetDefault(imageMirrorCountry, test.mirrorCountry)
|
||||
viper.SetDefault(kvmNUMACount, 1)
|
||||
config, _, err := generateClusterConfig(cmd, nil, k8sVersion, driver.Mock)
|
||||
config, _, err := generateClusterConfig(cmd, nil, k8sVersion, rtime, driver.Mock)
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error %v during config generation", err)
|
||||
}
|
||||
|
@ -179,6 +180,7 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
k8sVersion := constants.NewestKubernetesVersion
|
||||
rtime := constants.DefaultContainerRuntime
|
||||
var tests = []struct {
|
||||
description string
|
||||
proxy string
|
||||
|
@ -226,7 +228,7 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
|
|||
|
||||
cfg.DockerEnv = []string{} // clear docker env to avoid pollution
|
||||
proxy.SetDockerEnv()
|
||||
config, _, err := generateClusterConfig(cmd, nil, k8sVersion, "none")
|
||||
config, _, err := generateClusterConfig(cmd, nil, k8sVersion, rtime, "none")
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error %v during config generation", err)
|
||||
}
|
||||
|
|
|
@ -155,7 +155,7 @@ func chooseDefault(cc config.ClusterConfig) Manager {
|
|||
return KindNet{cc: cc}
|
||||
}
|
||||
|
||||
if cc.KubernetesConfig.ContainerRuntime != "docker" {
|
||||
if cc.KubernetesConfig.ContainerRuntime != constants.Docker {
|
||||
if driver.IsKIC(cc.Driver) {
|
||||
klog.Infof("%q driver + %s runtime found, recommending kindnet", cc.Driver, cc.KubernetesConfig.ContainerRuntime)
|
||||
return KindNet{cc: cc}
|
||||
|
|
|
@ -19,6 +19,7 @@ package cni
|
|||
import (
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
)
|
||||
|
||||
|
@ -34,7 +35,7 @@ func (c Disabled) String() string {
|
|||
|
||||
// Apply enables the CNI
|
||||
func (c Disabled) Apply(r Runner) error {
|
||||
if driver.IsKIC(c.cc.Driver) && c.cc.KubernetesConfig.ContainerRuntime != "docker" {
|
||||
if driver.IsKIC(c.cc.Driver) && c.cc.KubernetesConfig.ContainerRuntime != constants.Docker {
|
||||
klog.Warningf("CNI is recommended for %q driver and %q runtime - expect networking issues", c.cc.Driver, c.cc.KubernetesConfig.ContainerRuntime)
|
||||
}
|
||||
|
||||
|
|
|
@ -63,6 +63,7 @@ func PrimaryControlPlane(cc *ClusterConfig) (Node, error) {
|
|||
IP: cc.KubernetesConfig.NodeIP,
|
||||
Port: cc.KubernetesConfig.NodePort,
|
||||
KubernetesVersion: cc.KubernetesConfig.KubernetesVersion,
|
||||
ContainerRuntime: cc.KubernetesConfig.ContainerRuntime,
|
||||
ControlPlane: true,
|
||||
Worker: true,
|
||||
}
|
||||
|
|
|
@ -134,6 +134,7 @@ type Node struct {
|
|||
IP string
|
||||
Port int
|
||||
KubernetesVersion string
|
||||
ContainerRuntime string
|
||||
ControlPlane bool
|
||||
Worker bool
|
||||
}
|
||||
|
|
|
@ -60,8 +60,10 @@ const (
|
|||
Containerd = "containerd"
|
||||
// CRIO is the default name and spelling for the cri-o container runtime
|
||||
CRIO = "crio"
|
||||
// Docker is the default name and spelling for the docker container runtime
|
||||
Docker = "docker"
|
||||
// DefaultContainerRuntime is our default container runtime
|
||||
DefaultContainerRuntime = "docker"
|
||||
DefaultContainerRuntime = ""
|
||||
|
||||
// APIServerName is the default API server name
|
||||
APIServerName = "minikubeCA"
|
||||
|
|
|
@ -94,7 +94,7 @@ func testPreloadDownloadPreventsMultipleDownload(t *testing.T) {
|
|||
var group sync.WaitGroup
|
||||
group.Add(2)
|
||||
dlCall := func() {
|
||||
if err := Preload(constants.DefaultKubernetesVersion, constants.DefaultContainerRuntime, "docker"); err != nil {
|
||||
if err := Preload(constants.DefaultKubernetesVersion, constants.Docker, "docker"); err != nil {
|
||||
t.Logf("Failed to download preload: %+v (may be ok)", err)
|
||||
}
|
||||
group.Done()
|
||||
|
@ -119,7 +119,7 @@ func testPreloadNotExists(t *testing.T) {
|
|||
getChecksum = func(k8sVersion, containerRuntime string) ([]byte, error) { return []byte("check"), nil }
|
||||
ensureChecksumValid = func(k8sVersion, containerRuntime, path string, checksum []byte) error { return nil }
|
||||
|
||||
err := Preload(constants.DefaultKubernetesVersion, constants.DefaultContainerRuntime, "docker")
|
||||
err := Preload(constants.DefaultKubernetesVersion, constants.Docker, "docker")
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error when preload exists")
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ func testPreloadChecksumMismatch(t *testing.T) {
|
|||
return fmt.Errorf("checksum mismatch")
|
||||
}
|
||||
|
||||
err := Preload(constants.DefaultKubernetesVersion, constants.DefaultContainerRuntime, "docker")
|
||||
err := Preload(constants.DefaultKubernetesVersion, constants.Docker, "docker")
|
||||
expectedErrMsg := "checksum mismatch"
|
||||
if err == nil {
|
||||
t.Errorf("Expected error when checksum mismatches")
|
||||
|
|
|
@ -30,7 +30,7 @@ minikube start [flags]
|
|||
--cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none. (default true)
|
||||
--cert-expiration duration Duration until minikube certificate expiration, defaults to three years (26280h). (default 26280h0m0s)
|
||||
--cni string CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)
|
||||
--container-runtime string The container runtime to be used (docker, cri-o, containerd). (default "docker")
|
||||
--container-runtime string The container runtime to be used. Valid options: docker, cri-o, containerd (default: auto)
|
||||
--cpus string Number of CPUs allocated to Kubernetes. Use "max" to use the maximum number of CPUs. (default "2")
|
||||
--cri-socket string The cri socket path to be used.
|
||||
--delete-on-failure If set, delete the current cluster if start fails and try again. Defaults to false.
|
||||
|
|
|
@ -89,7 +89,7 @@ minikube start --extra-config=kubeadm.ignore-preflight-errors=SystemVerification
|
|||
|
||||
## Runtime configuration
|
||||
|
||||
The default container runtime in minikube is Docker. You can select it explicitly by using:
|
||||
The default container runtime in minikube varies. You can select one explicitly by using:
|
||||
|
||||
```shell
|
||||
minikube start --container-runtime=docker
|
||||
|
@ -100,6 +100,8 @@ Other options available are:
|
|||
* [containerd](https://github.com/containerd/containerd)
|
||||
* [cri-o](https://github.com/cri-o/cri-o)
|
||||
|
||||
See <https://kubernetes.io/docs/setup/production-environment/container-runtimes/>
|
||||
|
||||
## Environment variables
|
||||
|
||||
minikube supports passing environment variables instead of flags for every value listed in `minikube config`. This is done by passing an environment variable with the prefix `MINIKUBE_`.
|
||||
|
|
|
@ -647,6 +647,7 @@
|
|||
"The path on the file system where the testing docs in markdown need to be saved": "",
|
||||
"The podman service within '{{.cluster}}' is not active": "",
|
||||
"The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
|
||||
"The podman-env command is only compatible with the \"crio\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
|
||||
"The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.": "",
|
||||
"The service namespace": "",
|
||||
"The service/ingress {{.resource}} requires privileged ports to be exposed: {{.ports}}": "",
|
||||
|
|
Loading…
Reference in New Issue