Merge branch 'master' of github.com:kubernetes/minikube into kic-test

pull/10408/head
Sharif Elgamal 2021-02-17 14:47:42 -08:00
commit cb35af3fd3
18 changed files with 315 additions and 55 deletions

View File

@ -19,3 +19,5 @@ sha256 25dc558fbabc987bd58c7eab5230121b258a7b0eb34a49dc6595f1c6f3969116 v1.18.2.
sha256 d5c6442e3990938badc966cdd1eb9ebe2fc11345452c233aa0d87ca38fbeed81 v1.18.3.tar.gz
sha256 74a4e916acddc6cf47ab5752bdebb6732ce2c028505ef57b7edc21d2da9039b6 v1.18.4.tar.gz
sha256 fc8a8e61375e3ce30563eeb0fd6534c4f48fc20300a72e6ff51cc99cb2703516 v1.19.0.tar.gz
sha256 6165c5b8212ea03be2a465403177318bfe25a54c3e8d66d720344643913a0223 v1.19.1.tar.gz
sha256 76fd7543bc92d4364a11060f43a5131893a76c6e6e9d6de3a6bb6292c110b631 v1.20.0.tar.gz

View File

@ -4,8 +4,8 @@
#
################################################################################
CRIO_BIN_VERSION = v1.19.0
CRIO_BIN_COMMIT = 99c925bebdd9e392f2d575e25f2e6a1082e6c232
CRIO_BIN_VERSION = v1.20.0
CRIO_BIN_COMMIT = d388528dbed26b93c5bc1c89623607a1e597aa57
CRIO_BIN_SITE = https://github.com/cri-o/cri-o/archive
CRIO_BIN_SOURCE = $(CRIO_BIN_VERSION).tar.gz
CRIO_BIN_DEPENDENCIES = host-go libgpgme

View File

@ -29,6 +29,7 @@ storage_driver = "overlay"
# List to pass options to the storage driver. Please refer to
# containers-storage.conf(5) to see all available storage options.
#storage_option = [
# "overlay.mountopt=nodev,metacopy=on",
#]
# The default log directory where all logs will go unless directly specified by
@ -92,11 +93,6 @@ grpc_max_recv_msg_size = 16777216
#default_ulimits = [
#]
# default_runtime is the _name_ of the OCI runtime to be used as the default.
# The name is matched against the runtimes map below. If this value is changed,
# the corresponding existing entry from the runtimes map below will be ignored.
default_runtime = "runc"
# If true, the runtime will not use pivot_root, but instead use MS_MOVE.
no_pivot = false
@ -131,6 +127,12 @@ selinux = false
# will be used. This option supports live configuration reload.
seccomp_profile = ""
# Changes the meaning of an empty seccomp profile. By default
# (and according to CRI spec), an empty profile means unconfined.
# This option tells CRI-O to treat an empty profile as the default profile,
# which might increase security.
seccomp_use_default_when_empty = false
# Used to change the name of the default AppArmor profile of CRI-O. The default
# profile name is "crio-default". This profile only takes effect if the user
# does not specify a profile via the Kubernetes Pod's metadata annotation. If
@ -141,6 +143,9 @@ apparmor_profile = "crio-default"
# Cgroup management implementation used for the runtime.
cgroup_manager = "systemd"
# Specify whether the image pull must be performed in a separate cgroup.
separate_pull_cgroup = ""
# List of default capabilities for containers. If it is empty or commented out,
# only the capabilities defined in the containers json file by the user/kube
# will be added.
@ -174,11 +179,6 @@ hooks_dir = [
"/usr/share/containers/oci/hooks.d",
]
# List of default mounts for each container. **Deprecated:** this option will
# be removed in future versions in favor of default_mounts_file.
default_mounts = [
]
# Path to the file specifying the defaults mounts for each container. The
# format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads
# its default mounts from the following two files:
@ -243,7 +243,8 @@ gid_mappings = ""
ctr_stop_timeout = 30
# manage_ns_lifecycle determines whether we pin and remove namespaces
# and manage their lifecycle
# and manage their lifecycle.
# This option is being deprecated, and will be unconditionally true in the future.
manage_ns_lifecycle = true
# drop_infra_ctr determines whether CRI-O drops the infra container
@ -259,6 +260,11 @@ namespaces_dir = "/var/run"
# pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle
pinns_path = "/usr/bin/pinns"
# default_runtime is the _name_ of the OCI runtime to be used as the default.
# The name is matched against the runtimes map below. If this value is changed,
# the corresponding existing entry from the runtimes map below will be ignored.
default_runtime = "runc"
# The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes.
# The runtime to use is picked based on the runtime_handler provided by the CRI.
# If no runtime_handler is provided, the runtime will be picked based on the level
@ -268,7 +274,8 @@ pinns_path = "/usr/bin/pinns"
# runtime_path = "/path/to/the/executable"
# runtime_type = "oci"
# runtime_root = "/path/to/the/root"
#
# privileged_without_host_devices = false
# allowed_annotations = []
# Where:
# - runtime-handler: name used to identify the runtime
# - runtime_path (optional, string): absolute path to the runtime executable in
@ -279,6 +286,14 @@ pinns_path = "/usr/bin/pinns"
# omitted, an "oci" runtime is assumed.
# - runtime_root (optional, string): root directory for storage of containers
# state.
# - privileged_without_host_devices (optional, bool): an option for restricting
# host devices from being passed to privileged containers.
# - allowed_annotations (optional, array of strings): an option for specifying
# a list of experimental annotations that this runtime handler is allowed to process.
# The currently recognized values are:
# "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod.
# "io.kubernetes.cri-o.Devices" for configuring devices for the pod.
# "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm.
[crio.runtime.runtimes.runc]
@ -287,6 +302,8 @@ runtime_type = "oci"
runtime_root = "/run/runc"
# crun is a fast and lightweight fully featured OCI runtime and C library for
# running containers
#[crio.runtime.runtimes.crun]

View File

@ -29,6 +29,7 @@
# List to pass options to the storage driver. Please refer to
# containers-storage.conf(5) to see all available storage options.
#storage_option = [
# "overlay.mountopt=nodev,metacopy=on",
#]
# The default log directory where all logs will go unless directly specified by
@ -92,11 +93,6 @@ grpc_max_recv_msg_size = 16777216
#default_ulimits = [
#]
# default_runtime is the _name_ of the OCI runtime to be used as the default.
# The name is matched against the runtimes map below. If this value is changed,
# the corresponding existing entry from the runtimes map below will be ignored.
default_runtime = "runc"
# If true, the runtime will not use pivot_root, but instead use MS_MOVE.
no_pivot = false
@ -131,6 +127,12 @@ selinux = false
# will be used. This option supports live configuration reload.
seccomp_profile = ""
# Changes the meaning of an empty seccomp profile. By default
# (and according to CRI spec), an empty profile means unconfined.
# This option tells CRI-O to treat an empty profile as the default profile,
# which might increase security.
seccomp_use_default_when_empty = false
# Used to change the name of the default AppArmor profile of CRI-O. The default
# profile name is "crio-default". This profile only takes effect if the user
# does not specify a profile via the Kubernetes Pod's metadata annotation. If
@ -141,6 +143,9 @@ apparmor_profile = "crio-default"
# Cgroup management implementation used for the runtime.
cgroup_manager = "systemd"
# Specify whether the image pull must be performed in a separate cgroup.
separate_pull_cgroup = ""
# List of default capabilities for containers. If it is empty or commented out,
# only the capabilities defined in the containers json file by the user/kube
# will be added.
@ -174,11 +179,6 @@ hooks_dir = [
"/usr/share/containers/oci/hooks.d",
]
# List of default mounts for each container. **Deprecated:** this option will
# be removed in future versions in favor of default_mounts_file.
default_mounts = [
]
# Path to the file specifying the defaults mounts for each container. The
# format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads
# its default mounts from the following two files:
@ -243,7 +243,8 @@ gid_mappings = ""
ctr_stop_timeout = 30
# manage_ns_lifecycle determines whether we pin and remove namespaces
# and manage their lifecycle
# and manage their lifecycle.
# This option is being deprecated, and will be unconditionally true in the future.
manage_ns_lifecycle = true
# drop_infra_ctr determines whether CRI-O drops the infra container
@ -259,6 +260,11 @@ namespaces_dir = "/var/run"
# pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle
pinns_path = ""
# default_runtime is the _name_ of the OCI runtime to be used as the default.
# The name is matched against the runtimes map below. If this value is changed,
# the corresponding existing entry from the runtimes map below will be ignored.
default_runtime = "runc"
# The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes.
# The runtime to use is picked based on the runtime_handler provided by the CRI.
# If no runtime_handler is provided, the runtime will be picked based on the level
@ -268,7 +274,8 @@ pinns_path = ""
# runtime_path = "/path/to/the/executable"
# runtime_type = "oci"
# runtime_root = "/path/to/the/root"
#
# privileged_without_host_devices = false
# allowed_annotations = []
# Where:
# - runtime-handler: name used to identify the runtime
# - runtime_path (optional, string): absolute path to the runtime executable in
@ -279,6 +286,14 @@ pinns_path = ""
# omitted, an "oci" runtime is assumed.
# - runtime_root (optional, string): root directory for storage of containers
# state.
# - privileged_without_host_devices (optional, bool): an option for restricting
# host devices from being passed to privileged containers.
# - allowed_annotations (optional, array of strings): an option for specifying
# a list of experimental annotations that this runtime handler is allowed to process.
# The currently recognized values are:
# "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod.
# "io.kubernetes.cri-o.Devices" for configuring devices for the pod.
# "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm.
[crio.runtime.runtimes.runc]
@ -287,6 +302,8 @@ runtime_type = "oci"
runtime_root = "/run/runc"
# crun is a fast and lightweight fully featured OCI runtime and C library for
# running containers
#[crio.runtime.runtimes.crun]

View File

@ -1,4 +1,4 @@
sha256 a16846fe076aaf2c9ea2e854c3baba9fb838d916be7fb4b5be332e6c92d907d4 v1.9.3.tar.gz
sha256 5ebaa6e0dbd7fd1863f70d2bc71dc8a94e195c3339c17e3cac4560c9ec5747f8 v2.1.1.tar.gz
sha256 ec5473e51fa28f29af323473fc484f742dc7df23d06d8ba9f217f13382893a71 v2.2.0.tar.gz
sha256 bd86b181251e2308cb52f18410fb52d89df7f130cecf0298bbf9a848fe7daf60 v2.2.1.tar.gz
sha256 3212bad60d945c1169b27da03959f36d92d1d8964645c701a5a82a89118e96d1 v2.2.1.tar.gz

View File

@ -689,19 +689,18 @@ func GenerateTemplateData(addon *Addon, cfg config.KubernetesConfig) interface{}
opts.Registries[name] = "" // Avoid nil access when rendering
}
// Send messages to stderr due to some tests rely on stdout
if override, ok := opts.CustomRegistries[name]; ok {
out.ErrT(style.Option, "Using image {{.registry}}{{.image}}", out.V{
out.Step(style.Option, "Using image {{.registry}}{{.image}}", out.V{
"registry": override,
"image": image,
})
} else if opts.ImageRepository != "" {
out.ErrT(style.Option, "Using image {{.registry}}{{.image}} (global image repository)", out.V{
out.Step(style.Option, "Using image {{.registry}}{{.image}} (global image repository)", out.V{
"registry": opts.ImageRepository,
"image": image,
})
} else {
out.ErrT(style.Option, "Using image {{.registry}}{{.image}}", out.V{
out.Step(style.Option, "Using image {{.registry}}{{.image}}", out.V{
"registry": opts.Registries[name],
"image": image,
})

View File

@ -37,6 +37,8 @@ const (
NodeReadyKey = "node_ready"
// KubeletKey is the name used in the flags for waiting for the kubelet status to be ready
KubeletKey = "kubelet"
// ExtraKey is the name used for extra waiting for pods in CorePodsList to be Ready
ExtraKey = "extra"
)
// vars related to the --wait flag
@ -44,9 +46,9 @@ var (
// DefaultComponents is map of the the default components to wait for
DefaultComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true}
// NoWaitComponents is map of componets to wait for if specified 'none' or 'false'
NoComponents = map[string]bool{APIServerWaitKey: false, SystemPodsWaitKey: false, DefaultSAWaitKey: false, AppsRunningKey: false, NodeReadyKey: false, KubeletKey: false}
NoComponents = map[string]bool{APIServerWaitKey: false, SystemPodsWaitKey: false, DefaultSAWaitKey: false, AppsRunningKey: false, NodeReadyKey: false, KubeletKey: false, ExtraKey: false}
// AllComponents is map for waiting for all components.
AllComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true, DefaultSAWaitKey: true, AppsRunningKey: true, NodeReadyKey: true, KubeletKey: true}
AllComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true, DefaultSAWaitKey: true, AppsRunningKey: true, NodeReadyKey: true, KubeletKey: true, ExtraKey: true}
// DefaultWaitList is list of all default components to wait for. only names to be used for start flags.
DefaultWaitList = []string{APIServerWaitKey, SystemPodsWaitKey}
// AllComponentsList list of all valid components keys to wait for. only names to be used used for start flags.
@ -60,6 +62,15 @@ var (
"kube-proxy",
"kube-scheduler",
}
// CorePodsList is a list of essential pods for running kurnetes to extra wait for them to be Ready
CorePodsList = []string{
"kube-dns", // coredns
"etcd",
"kube-apiserver",
"kube-controller-manager",
"kube-proxy",
"kube-scheduler",
}
)
// ShouldWait will return true if the config says need to wait

View File

@ -0,0 +1,133 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package kverify verifies a running Kubernetes cluster is healthy
package kverify
import (
"fmt"
"strings"
"time"
"github.com/pkg/errors"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
// WaitExtra calls WaitForPodReadyByLabel for each pod in labels list and returns any errors occurred.
func WaitExtra(cs *kubernetes.Clientset, labels []string, timeout time.Duration) error {
klog.Infof("extra waiting for kube-system core pods %s to be Ready ...", labels)
start := time.Now()
defer func() {
klog.Infof("duration metric: took %s for extra waiting for kube-system core pods to be Ready ...", time.Since(start))
}()
var errs []string
for _, label := range labels {
if err := waitForPodReadyByLabel(cs, label, "kube-system", timeout); err != nil {
errs = append(errs, fmt.Sprintf("%q: %q", label, err.Error()))
}
}
if errs != nil {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
// waitForPodReadyByLabel waits for pod with label ([key:]val) in a namespace to be in Ready condition.
// If namespace is not provided, it defaults to "kube-system".
// If label key is not provided, it will try with "component" and "k8s-app".
func waitForPodReadyByLabel(cs *kubernetes.Clientset, label, namespace string, timeout time.Duration) error {
klog.Infof("waiting %v for pod with %q label in %q namespace to be Ready ...", timeout, label, namespace)
start := time.Now()
defer func() {
klog.Infof("duration metric: took %v to run WaitForPodReadyByLabel for pod with %q label in %q namespace ...", time.Since(start), label, namespace)
}()
if namespace == "" {
namespace = "kube-system"
}
lkey := ""
lval := ""
l := strings.Split(label, ":")
switch len(l) {
case 1: // treat as no label key provided, just val
lval = strings.TrimSpace(l[0])
case 2:
lkey = strings.TrimSpace(l[0])
lval = strings.TrimSpace(l[1])
default:
return fmt.Errorf("pod label %q is malformed", label)
}
lap := time.Now()
checkReady := func() (bool, error) {
if time.Since(start) > timeout {
return false, fmt.Errorf("wait for pod with %q label in %q namespace to be Ready timed out", label, namespace)
}
pods, err := cs.CoreV1().Pods(namespace).List(meta.ListOptions{})
if err != nil {
klog.Infof("error listing pods in %q namespace, will retry: %v", namespace, err)
return false, nil
}
for _, pod := range pods.Items {
for k, v := range pod.ObjectMeta.Labels {
if ((lkey == "" && (k == "component" || k == "k8s-app")) || lkey == k) && v == lval {
ready, reason := IsPodReady(&pod)
if ready {
klog.Info(reason)
return true, nil
}
// reduce log spam
if time.Since(lap) > (1 * time.Second) {
klog.Info(reason)
lap = time.Now()
}
return false, nil
}
}
}
klog.Infof("pod with %q label in %q namespace was not found, will retry", label, namespace)
return false, nil
}
if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, checkReady); err != nil {
return errors.Wrapf(err, "wait pod Ready")
}
return nil
}
// IsPodReady returns if pod is Ready and verbose reason.
func IsPodReady(pod *core.Pod) (ready bool, reason string) {
if pod.Status.Phase != core.PodRunning {
return false, fmt.Sprintf("pod %q in %q namespace is not Running: %+v", pod.Name, pod.Namespace, pod.Status)
}
for _, c := range pod.Status.Conditions {
if c.Type == core.PodReady {
if c.Status != core.ConditionTrue {
return false, fmt.Sprintf("pod %q in %q namespace is not Ready: %+v", pod.Name, pod.Namespace, c)
}
return true, fmt.Sprintf("pod %q in %q namespace is Ready: %+v", pod.Name, pod.Namespace, c)
}
}
return false, fmt.Sprintf("pod %q in %q namespace does not have %q status: %+v", pod.Name, pod.Namespace, core.PodReady, pod.Status)
}

View File

@ -36,6 +36,7 @@ import (
"github.com/docker/machine/libmachine"
"github.com/docker/machine/libmachine/state"
"github.com/pkg/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
@ -470,6 +471,12 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
return nil
}
if cfg.VerifyComponents[kverify.ExtraKey] {
if err := kverify.WaitExtra(client, kverify.CorePodsList, timeout); err != nil {
return errors.Wrap(err, "extra waiting")
}
}
cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c})
if err != nil {
return errors.Wrapf(err, "create runtme-manager %s", cfg.KubernetesConfig.ContainerRuntime)
@ -504,11 +511,11 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
}
}
}
if cfg.VerifyComponents[kverify.KubeletKey] {
if err := kverify.WaitForService(k.c, "kubelet", timeout); err != nil {
return errors.Wrap(err, "waiting for kubelet")
}
}
if cfg.VerifyComponents[kverify.NodeReadyKey] {
@ -658,6 +665,35 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error {
}
}
if cfg.VerifyComponents[kverify.ExtraKey] {
// after kubelet is restarted (with 'kubeadm init phase kubelet-start' above),
// it appears as to be immediately Ready as well as all kube-system pods,
// then (after ~10sec) it realises it has some changes to apply, implying also pods restarts,
// and by that time we would exit completely, so we wait until kubelet begins restarting pods
klog.Info("waiting for restarted kubelet to initialise ...")
start := time.Now()
wait := func() error {
pods, err := client.CoreV1().Pods("kube-system").List(meta.ListOptions{})
if err != nil {
return err
}
for _, pod := range pods.Items {
if pod.Labels["tier"] == "control-plane" {
if ready, _ := kverify.IsPodReady(&pod); !ready {
return nil
}
}
}
return fmt.Errorf("kubelet not initialised")
}
_ = retry.Expo(wait, 250*time.Millisecond, 1*time.Minute)
klog.Infof("kubelet initialised")
klog.Infof("duration metric: took %s waiting for restarted kubelet to initialise ...", time.Since(start))
if err := kverify.WaitExtra(client, kverify.CorePodsList, kconst.DefaultControlPlaneTimeout); err != nil {
return errors.Wrap(err, "extra")
}
}
cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c})
if err != nil {
return errors.Wrap(err, "runtime")
@ -698,6 +734,7 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error {
if err := bsutil.AdjustResourceLimits(k.c); err != nil {
klog.Warningf("unable to adjust resource limits: %v", err)
}
return nil
}

View File

@ -151,9 +151,12 @@ func New(c Config) (Manager, error) {
switch c.Type {
case "", "docker":
return &Docker{
Socket: c.Socket,
Runner: c.Runner,
Init: sm,
Socket: c.Socket,
Runner: c.Runner,
ImageRepository: c.ImageRepository,
KubernetesVersion: c.KubernetesVersion,
Init: sm,
UseCRI: (c.Socket != ""), // !dockershim
}, nil
case "crio", "cri-o":
return &CRIO{

View File

@ -23,6 +23,7 @@ import (
"strings"
"time"
"github.com/blang/semver"
"github.com/pkg/errors"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/assets"
@ -56,9 +57,12 @@ func (e *ErrISOFeature) Error() string {
// Docker contains Docker runtime state
type Docker struct {
Socket string
Runner CommandRunner
Init sysinit.Manager
Socket string
Runner CommandRunner
ImageRepository string
KubernetesVersion semver.Version
Init sysinit.Manager
UseCRI bool
}
// Name is a human readable name for Docker
@ -181,6 +185,14 @@ func (r *Docker) CGroupDriver() (string, error) {
// KubeletOptions returns kubelet options for a runtime.
func (r *Docker) KubeletOptions() map[string]string {
if r.UseCRI {
return map[string]string{
"container-runtime": "remote",
"container-runtime-endpoint": r.SocketPath(),
"image-service-endpoint": r.SocketPath(),
"runtime-request-timeout": "15m",
}
}
return map[string]string{
"container-runtime": "docker",
}
@ -188,6 +200,9 @@ func (r *Docker) KubeletOptions() map[string]string {
// ListContainers returns a list of containers
func (r *Docker) ListContainers(o ListOptions) ([]string, error) {
if r.UseCRI {
return listCRIContainers(r.Runner, "", o)
}
args := []string{"ps"}
switch o.State {
case All:
@ -220,6 +235,9 @@ func (r *Docker) ListContainers(o ListOptions) ([]string, error) {
// KillContainers forcibly removes a running container based on ID
func (r *Docker) KillContainers(ids []string) error {
if r.UseCRI {
return killCRIContainers(r.Runner, ids)
}
if len(ids) == 0 {
return nil
}
@ -234,6 +252,9 @@ func (r *Docker) KillContainers(ids []string) error {
// StopContainers stops a running container based on ID
func (r *Docker) StopContainers(ids []string) error {
if r.UseCRI {
return stopCRIContainers(r.Runner, ids)
}
if len(ids) == 0 {
return nil
}
@ -248,6 +269,9 @@ func (r *Docker) StopContainers(ids []string) error {
// PauseContainers pauses a running container based on ID
func (r *Docker) PauseContainers(ids []string) error {
if r.UseCRI {
return pauseCRIContainers(r.Runner, "", ids)
}
if len(ids) == 0 {
return nil
}
@ -262,6 +286,9 @@ func (r *Docker) PauseContainers(ids []string) error {
// UnpauseContainers unpauses a container based on ID
func (r *Docker) UnpauseContainers(ids []string) error {
if r.UseCRI {
return unpauseCRIContainers(r.Runner, "", ids)
}
if len(ids) == 0 {
return nil
}
@ -276,6 +303,9 @@ func (r *Docker) UnpauseContainers(ids []string) error {
// ContainerLogCmd returns the command to retrieve the log for a container based on ID
func (r *Docker) ContainerLogCmd(id string, len int, follow bool) string {
if r.UseCRI {
return criContainerLogCmd(r.Runner, id, len, follow)
}
var cmd strings.Builder
cmd.WriteString("docker logs ")
if len > 0 {

View File

@ -245,6 +245,7 @@ func Provision(cc *config.ClusterConfig, n *config.Node, apiServer bool, delOnFa
func configureRuntimes(runner cruntime.CommandRunner, cc config.ClusterConfig, kv semver.Version) cruntime.Manager {
co := cruntime.Config{
Type: cc.KubernetesConfig.ContainerRuntime,
Socket: cc.KubernetesConfig.CRISocket,
Runner: runner,
ImageRepository: cc.KubernetesConfig.ImageRepository,
KubernetesVersion: kv,

View File

@ -8,7 +8,9 @@ description: >
---
Community triage takes place **every Wednesday** from **11AM-12PM PST**.
Hangouts link: https://meet.google.com/ikf-fvrs-eer
- Hangouts link: https://meet.google.com/ikf-fvrs-eer
- Google Group: https://groups.google.com/forum/#!forum/minikube-dev
All community members are welcome and encouraged to join and help us triage minikube!

View File

@ -25,7 +25,7 @@ A NodePort service is the most basic way to get external traffic directly to you
We also have a shortcut for fetching the minikube IP and a service's `NodePort`:
```shell
minikube service --url $SERVICE
minikube service --url <service-name>
```
## Getting the NodePort using kubectl
@ -35,7 +35,7 @@ The minikube VM is exposed to the host system via a host-only IP address, that c
To determine the NodePort for your service, you can use a `kubectl` command like this (note that `nodePort` begins with lowercase `n` in JSON output):
```shell
kubectl get service $SERVICE --output='jsonpath="{.spec.ports[0].nodePort}"'
kubectl get service <service-name> --output='jsonpath="{.spec.ports[0].nodePort}"'
```
### Increasing the NodePort range

View File

@ -256,7 +256,6 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) {
if !strings.Contains(rr.Output(), expectedImgInside) {
t.Fatalf("expected 'docker images' to have %q inside minikube. but the output is: *%s*", expectedImgInside, rr.Output())
}
}
func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) {
@ -269,7 +268,7 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) {
// Use more memory so that we may reliably fit MySQL and nginx
// changing api server so later in soft start we verify it didn't change
startArgs := append([]string{"start", "-p", profile, "--memory=4000", fmt.Sprintf("--apiserver-port=%d", apiPortTest), "--wait=true"}, StartArgs()...)
startArgs := append([]string{"start", "-p", profile, "--memory=4000", fmt.Sprintf("--apiserver-port=%d", apiPortTest), "--wait=all"}, StartArgs()...)
c := exec.CommandContext(ctx, Target(), startArgs...)
env := os.Environ()
env = append(env, fmt.Sprintf("HTTP_PROXY=%s", srv.Addr))
@ -401,7 +400,6 @@ func validateMinikubeKubectlDirectCall(ctx context.Context, t *testing.T, profil
if err != nil {
t.Fatalf("failed to run kubectl directly. args %q: %v", rr.Command(), err)
}
}
func validateExtraConfig(ctx context.Context, t *testing.T, profile string) {
@ -409,7 +407,7 @@ func validateExtraConfig(ctx context.Context, t *testing.T, profile string) {
start := time.Now()
// The tests before this already created a profile, starting minikube with different --extra-config cmdline option.
startArgs := []string{"start", "-p", profile, "--extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision"}
startArgs := []string{"start", "-p", profile, "--extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision", "--wait=all"}
c := exec.CommandContext(ctx, Target(), startArgs...)
rr, err := Run(t, c)
if err != nil {
@ -427,7 +425,6 @@ func validateExtraConfig(ctx context.Context, t *testing.T, profile string) {
if !strings.Contains(afterCfg.Config.KubernetesConfig.ExtraOptions.String(), expectedExtraOptions) {
t.Errorf("expected ExtraOptions to contain %s but got %s", expectedExtraOptions, afterCfg.Config.KubernetesConfig.ExtraOptions.String())
}
}
// imageID returns a docker image id for image `image` and current architecture
@ -451,6 +448,7 @@ func imageID(image string) string {
}
// validateComponentHealth asserts that all Kubernetes components are healthy
// note: it expects all components to be Ready, so it makes sense to run it close after only those tests that include '--wait=all' start flag (ie, with extra wait)
func validateComponentHealth(ctx context.Context, t *testing.T, profile string) {
defer PostMortemLogs(t, profile)
@ -474,12 +472,22 @@ func validateComponentHealth(ctx context.Context, t *testing.T, profile string)
for _, i := range cs.Items {
for _, l := range i.Labels {
t.Logf("%s phase: %s", l, i.Status.Phase)
_, ok := found[l]
if ok {
if _, ok := found[l]; ok { // skip irrelevant (eg, repeating/redundant '"tier": "control-plane"') labels
found[l] = true
if i.Status.Phase != "Running" {
t.Logf("%s phase: %s", l, i.Status.Phase)
if i.Status.Phase != api.PodRunning {
t.Errorf("%s is not Running: %+v", l, i.Status)
continue
}
for _, c := range i.Status.Conditions {
if c.Type == api.PodReady {
if c.Status != api.ConditionTrue {
t.Errorf("%s is not Ready: %+v", l, i.Status)
} else {
t.Logf("%s status: %s", l, c.Type)
}
break
}
}
}
}

View File

@ -155,7 +155,7 @@ func TestErrorJSONOutput(t *testing.T) {
t.Fatalf("last cloud event is not of type error: %v", last)
}
last.validateData(t, "exitcode", fmt.Sprintf("%v", reason.ExDriverUnsupported))
last.validateData(t, "message", fmt.Sprintf("The driver 'fail' is not supported on %s", runtime.GOOS))
last.validateData(t, "message", fmt.Sprintf("The driver 'fail' is not supported on %s/%s", runtime.GOOS, runtime.GOARCH))
}
type cloudEvent struct {

View File

@ -95,7 +95,7 @@ func TestSkaffold(t *testing.T) {
}()
// make sure "skaffold run" exits without failure
cmd := exec.CommandContext(ctx, tf.Name(), "run", "--minikube-profile", profile, "--kube-context", profile, "--status-check=true", "--port-forward=false")
cmd := exec.CommandContext(ctx, tf.Name(), "run", "--minikube-profile", profile, "--kube-context", profile, "--status-check=true", "--port-forward=false", "--interactive=false")
cmd.Dir = "testdata/skaffold"
rr, err = Run(t, cmd)
if err != nil {

View File

@ -79,7 +79,7 @@ func TestStartStop(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
MaybeParallel(t)
profile := UniqueProfileName(tc.name)
ctx, cancel := context.WithTimeout(context.Background(), Minutes(40))
ctx, cancel := context.WithTimeout(context.Background(), Minutes(30))
defer Cleanup(t, profile, cancel)
type validateStartStopFunc func(context.Context, *testing.T, string, string, string, []string)
if !strings.Contains(tc.name, "docker") && NoneDriver() {