Merge pull request #24138 from kubernetes/dev-1.20

Official 1.20 Release Docs
pull/25503/head
Anna 2020-12-08 15:11:00 -06:00 committed by GitHub
commit 391eef6bda
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
97 changed files with 54332 additions and 1504 deletions

View File

@ -138,10 +138,10 @@ time_format_default = "January 02, 2006 at 3:04 PM PST"
description = "Production-Grade Container Orchestration"
showedit = true
latest = "v1.19"
latest = "v1.20"
fullversion = "v1.19.0"
version = "v1.19"
fullversion = "v1.20.0"
version = "v1.20"
githubbranch = "master"
docsbranch = "master"
deprecated = false
@ -183,40 +183,40 @@ js = [
]
[[params.versions]]
fullversion = "v1.19.0"
version = "v1.19"
githubbranch = "v1.19.0"
fullversion = "v1.20.0"
version = "v1.20"
githubbranch = "v1.20.0"
docsbranch = "master"
url = "https://kubernetes.io"
[[params.versions]]
fullversion = "v1.18.8"
fullversion = "v1.19.4"
version = "v1.19"
githubbranch = "v1.19.4"
docsbranch = "release-1.19"
url = "https://v1-19.docs.kubernetes.io"
[[params.versions]]
fullversion = "v1.18.12"
version = "v1.18"
githubbranch = "v1.18.8"
githubbranch = "v1.18.12"
docsbranch = "release-1.18"
url = "https://v1-18.docs.kubernetes.io"
[[params.versions]]
fullversion = "v1.17.11"
fullversion = "v1.17.14"
version = "v1.17"
githubbranch = "v1.17.11"
githubbranch = "v1.17.14"
docsbranch = "release-1.17"
url = "https://v1-17.docs.kubernetes.io"
[[params.versions]]
fullversion = "v1.16.14"
fullversion = "v1.16.15"
version = "v1.16"
githubbranch = "v1.16.14"
githubbranch = "v1.16.15"
docsbranch = "release-1.16"
url = "https://v1-16.docs.kubernetes.io"
[[params.versions]]
fullversion = "v1.15.12"
version = "v1.15"
githubbranch = "v1.15.12"
docsbranch = "release-1.15"
url = "https://v1-15.docs.kubernetes.io"
# User interface configuration
[params.ui]

View File

@ -330,6 +330,26 @@ the kubelet can use topology hints when making resource assignment decisions.
See [Control Topology Management Policies on a Node](/docs/tasks/administer-cluster/topology-manager/)
for more information.
## Graceful Node Shutdown {#graceful-node-shutdown}
{{< feature-state state="alpha" for_k8s_version="v1.20" >}}
If you have enabled the `GracefulNodeShutdown` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/), then the kubelet attempts to detect the node system shutdown and terminates pods running on the node.
Kubelet ensures that pods follow the normal [pod termination process](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) during the node shutdown.
When the `GracefulNodeShutdown` feature gate is enabled, kubelet uses [systemd inhibitor locks](https://www.freedesktop.org/wiki/Software/systemd/inhibit/) to delay the node shutdown with a given duration. During a shutdown kubelet terminates pods in two phases:
1. Terminate regular pods running on the node.
2. Terminate [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical) running on the node.
Graceful Node Shutdown feature is configured with two [`KubeletConfiguration`](/docs/tasks/administer-cluster/kubelet-config-file/) options:
* `ShutdownGracePeriod`:
* Specifies the total duration that the node should delay the shutdown by. This is the total grace period for pod termination for both regular and [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical).
* `ShutdownGracePeriodCriticalPods`:
* Specifies the duration used to terminate [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical) during a node shutdown. This should be less than `ShutdownGracePeriod`.
For example, if `ShutdownGracePeriod=30s`, and `ShutdownGracePeriodCriticalPods=10s`, kubelet will delay the node shutdown by 30 seconds. During the shutdown, the first 20 (30-10) seconds would be reserved for gracefully terminating normal pods, and the last 10 seconds would be reserved for terminating [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical).
## {{% heading "whatsnext" %}}

View File

@ -6,7 +6,7 @@ min-kubernetes-server-version: v1.18
<!-- overview -->
{{< feature-state state="alpha" for_k8s_version="v1.18" >}}
{{< feature-state state="beta" for_k8s_version="v1.20" >}}
Controlling the behavior of the Kubernetes API server in an overload situation
is a key task for cluster administrators. The {{< glossary_tooltip
@ -37,25 +37,30 @@ Fairness feature enabled.
<!-- body -->
## Enabling API Priority and Fairness
## Enabling/Disabling API Priority and Fairness
The API Priority and Fairness feature is controlled by a feature gate
and is not enabled by default. See
and is enabled by default. See
[Feature Gates](/docs/reference/command-line-tools-reference/feature-gates/)
for a general explanation of feature gates and how to enable and disable them. The
name of the feature gate for APF is "APIPriorityAndFairness". This
feature also involves an {{< glossary_tooltip term_id="api-group"
text="API Group" >}} that must be enabled. You can do these
things by adding the following command-line flags to your
`kube-apiserver` invocation:
for a general explanation of feature gates and how to enable and
disable them. The name of the feature gate for APF is
"APIPriorityAndFairness". This feature also involves an {{<
glossary_tooltip term_id="api-group" text="API Group" >}} with: (a) a
`v1alpha1` version, disabled by default, and (b) a `v1beta1`
version, enabled by default. You can disable the feature
gate and API group v1beta1 version by adding the following
command-line flags to your `kube-apiserver` invocation:
```shell
kube-apiserver \
--feature-gates=APIPriorityAndFairness=true \
--runtime-config=flowcontrol.apiserver.k8s.io/v1alpha1=true \
--feature-gates=APIPriorityAndFairness=false \
--runtime-config=flowcontrol.apiserver.k8s.io/v1beta1=false \
# …and other flags as usual
```
Alternatively, you can enable the v1alpha1 version of the API group
with `--runtime-config=flowcontrol.apiserver.k8s.io/v1beta1=true`.
The command-line flag `--enable-priority-and-fairness=false` will disable the
API Priority and Fairness feature, even if other flags have enabled it.
@ -189,12 +194,14 @@ that originate from outside your cluster.
## Resources
The flow control API involves two kinds of resources.
[PriorityLevelConfigurations](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#prioritylevelconfiguration-v1alpha1-flowcontrol-apiserver-k8s-io)
[PriorityLevelConfigurations](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io)
define the available isolation classes, the share of the available concurrency
budget that each can handle, and allow for fine-tuning queuing behavior.
[FlowSchemas](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#flowschema-v1alpha1-flowcontrol-apiserver-k8s-io)
are used to classify individual inbound requests, matching each to a single
PriorityLevelConfiguration.
[FlowSchemas](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#flowschema-v1beta1-flowcontrol-apiserver-k8s-io)
are used to classify individual inbound requests, matching each to a
single PriorityLevelConfiguration. There is also a `v1alpha1` version
of the same API group, and it has the same Kinds with the same syntax and
semantics.
### PriorityLevelConfiguration
A PriorityLevelConfiguration represents a single isolation class. Each
@ -331,6 +338,13 @@ PriorityLevelConfigurations.
### Metrics
{{< note >}}
In versions of Kubernetes before v1.20, the labels `flow_schema` and
`priority_level` were inconsistently named `flowSchema` and `priorityLevel`,
respectively. If you're running Kubernetes versions v1.19 and earlier, you
should refer to the documentation for your version.
{{< /note >}}
When you enable the API Priority and Fairness feature, the kube-apiserver
exports additional metrics. Monitoring these can help you determine whether your
configuration is inappropriately throttling important traffic, or find
@ -338,8 +352,8 @@ poorly-behaved workloads that may be harming system health.
* `apiserver_flowcontrol_rejected_requests_total` is a counter vector
(cumulative since server start) of requests that were rejected,
broken down by the labels `flowSchema` (indicating the one that
matched the request), `priorityLevel` (indicating the one to which
broken down by the labels `flow_schema` (indicating the one that
matched the request), `priority_level` (indicating the one to which
the request was assigned), and `reason`. The `reason` label will be
have one of the following values:
* `queue-full`, indicating that too many requests were already
@ -352,8 +366,8 @@ poorly-behaved workloads that may be harming system health.
* `apiserver_flowcontrol_dispatched_requests_total` is a counter
vector (cumulative since server start) of requests that began
executing, broken down by the labels `flowSchema` (indicating the
one that matched the request) and `priorityLevel` (indicating the
executing, broken down by the labels `flow_schema` (indicating the
one that matched the request) and `priority_level` (indicating the
one to which the request was assigned).
* `apiserver_current_inqueue_requests` is a gauge vector of recent
@ -384,17 +398,17 @@ poorly-behaved workloads that may be harming system health.
* `apiserver_flowcontrol_current_inqueue_requests` is a gauge vector
holding the instantaneous number of queued (not executing) requests,
broken down by the labels `priorityLevel` and `flowSchema`.
broken down by the labels `priority_level` and `flow_schema`.
* `apiserver_flowcontrol_current_executing_requests` is a gauge vector
holding the instantaneous number of executing (not waiting in a
queue) requests, broken down by the labels `priorityLevel` and
`flowSchema`.
queue) requests, broken down by the labels `priority_level` and
`flow_schema`.
* `apiserver_flowcontrol_priority_level_request_count_samples` is a
histogram vector of observations of the then-current number of
requests broken down by the labels `phase` (which takes on the
values `waiting` and `executing`) and `priorityLevel`. Each
values `waiting` and `executing`) and `priority_level`. Each
histogram gets observations taken periodically, up through the last
activity of the relevant sort. The observations are made at a high
rate.
@ -402,7 +416,7 @@ poorly-behaved workloads that may be harming system health.
* `apiserver_flowcontrol_priority_level_request_count_watermarks` is a
histogram vector of high or low water marks of the number of
requests broken down by the labels `phase` (which takes on the
values `waiting` and `executing`) and `priorityLevel`; the label
values `waiting` and `executing`) and `priority_level`; the label
`mark` takes on values `high` and `low`. The water marks are
accumulated over windows bounded by the times when an observation
was added to
@ -411,7 +425,7 @@ poorly-behaved workloads that may be harming system health.
* `apiserver_flowcontrol_request_queue_length_after_enqueue` is a
histogram vector of queue lengths for the queues, broken down by
the labels `priorityLevel` and `flowSchema`, as sampled by the
the labels `priority_level` and `flow_schema`, as sampled by the
enqueued requests. Each request that gets queued contributes one
sample to its histogram, reporting the length of the queue just
after the request was added. Note that this produces different
@ -428,12 +442,12 @@ poorly-behaved workloads that may be harming system health.
* `apiserver_flowcontrol_request_concurrency_limit` is a gauge vector
holding the computed concurrency limit (based on the API server's
total concurrency limit and PriorityLevelConfigurations' concurrency
shares), broken down by the label `priorityLevel`.
shares), broken down by the label `priority_level`.
* `apiserver_flowcontrol_request_wait_duration_seconds` is a histogram
vector of how long requests spent queued, broken down by the labels
`flowSchema` (indicating which one matched the request),
`priorityLevel` (indicating the one to which the request was
`flow_schema` (indicating which one matched the request),
`priority_level` (indicating the one to which the request was
assigned), and `execute` (indicating whether the request started
executing).
{{< note >}}
@ -445,8 +459,8 @@ poorly-behaved workloads that may be harming system health.
* `apiserver_flowcontrol_request_execution_seconds` is a histogram
vector of how long requests took to actually execute, broken down by
the labels `flowSchema` (indicating which one matched the request)
and `priorityLevel` (indicating the one to which the request was
the labels `flow_schema` (indicating which one matched the request)
and `priority_level` (indicating the one to which the request was
assigned).
### Debug endpoints
@ -515,4 +529,3 @@ For background information on design details for API priority and fairness, see
the [enhancement proposal](https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190228-priority-and-fairness.md).
You can make suggestions and feature requests via [SIG API
Machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery).

View File

@ -91,6 +91,27 @@ List of components currently supporting JSON format:
* {{< glossary_tooltip term_id="kube-scheduler" text="kube-scheduler" >}}
* {{< glossary_tooltip term_id="kubelet" text="kubelet" >}}
### Log sanitization
{{< feature-state for_k8s_version="v1.20" state="alpha" >}}
{{<warning >}}
Log sanitization might incur significant computation overhead and therefore should not be enabled in production.
{{< /warning >}}
The `--experimental-logging-sanitization` flag enables the klog sanitization filter.
If enabled all log arguments are inspected for fields tagged as sensitive data (e.g. passwords, keys, tokens) and logging of these fields will be prevented.
List of components currently supporting log sanitization:
* kube-controller-manager
* kube-apiserver
* kube-scheduler
* kubelet
{{< note >}}
The Log sanitization filter does not prevent user workload logs from leaking sensitive data.
{{< /note >}}
### Log verbosity level
The `-v` flag controls log verbosity. Increasing the value increases the number of logged events. Decreasing the value decreases the number of logged events.

View File

@ -129,6 +129,28 @@ cloudprovider_gce_api_request_duration_seconds { request = "detach_disk"}
cloudprovider_gce_api_request_duration_seconds { request = "list_disk"}
```
### kube-scheduler metrics
{{< feature-state for_k8s_version="v1.20" state="alpha" >}}
The scheduler exposes optional metrics that reports the requested resources and the desired limits of all running pods. These metrics can be used to build capacity planning dashboards, assess current or historical scheduling limits, quickly identify workloads that cannot schedule due to lack of resources, and compare actual usage to the pod's request.
The kube-scheduler identifies the resource [requests and limits](/docs/concepts/configuration/manage-resources-containers/) configured for each Pod; when either a request or limit is non-zero, the kube-scheduler reports a metrics timeseries. The time series is labelled by:
- namespace
- pod name
- the node where the pod is scheduled or an empty string if not yet scheduled
- priority
- the assigned scheduler for that pod
- the name of the resource (for example, `cpu`)
- the unit of the resource if known (for example, `cores`)
Once a pod reaches completion (has a `restartPolicy` of `Never` or `OnFailure` and is in the `Succeeded` or `Failed` pod phase, or has been deleted and all containers have a terminated state) the series is no longer reported since the scheduler is now free to schedule other pods to run. The two metrics are called `kube_pod_resource_request` and `kube_pod_resource_limit`.
The metrics are exposed at the HTTP endpoint `/metrics/resources` and require the same authorization as the `/metrics`
endpoint on the scheduler. You must use the `--show-hidden-metrics-for-version=1.20` flag to expose these alpha stability metrics.
## {{% heading "whatsnext" %}}
* Read about the [Prometheus text format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) for metrics

View File

@ -600,6 +600,10 @@ spec:
example.com/foo: 1
```
## PID limiting
Process ID (PID) limits allow for the configuration of a kubelet to limit the number of PIDs that a given Pod can consume. See [Pid Limiting](/docs/concepts/policy/pid-limiting/) for information.
## Troubleshooting
### My Pods are pending with event message failedScheduling

View File

@ -9,7 +9,7 @@ weight: 20
<!-- overview -->
{{< feature-state for_k8s_version="v1.14" state="beta" >}}
{{< feature-state for_k8s_version="v1.20" state="stable" >}}
This page describes the RuntimeClass resource and runtime selection mechanism.
@ -66,7 +66,7 @@ The RuntimeClass resource currently only has 2 significant fields: the RuntimeCl
(`metadata.name`) and the handler (`handler`). The object definition looks like this:
```yaml
apiVersion: node.k8s.io/v1beta1 # RuntimeClass is defined in the node.k8s.io API group
apiVersion: node.k8s.io/v1 # RuntimeClass is defined in the node.k8s.io API group
kind: RuntimeClass
metadata:
name: myclass # The name the RuntimeClass will be referenced by
@ -186,4 +186,3 @@ are accounted for in Kubernetes.
- Read about the [Pod Overhead](/docs/concepts/scheduling-eviction/pod-overhead/) concept
- [PodOverhead Feature Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md)

View File

@ -204,7 +204,8 @@ DaemonSet, `/var/lib/kubelet/pod-resources` must be mounted as a
{{< glossary_tooltip term_id="volume" >}} in the plugin's
[PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core).
Support for the "PodResources service" requires `KubeletPodResources` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to be enabled. It is enabled by default starting with Kubernetes 1.15.
Support for the "PodResources service" requires `KubeletPodResources` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to be enabled.
It is enabled by default starting with Kubernetes 1.15 and is v1 since Kubernetes 1.20.
## Device Plugin integration with the Topology Manager

View File

@ -0,0 +1,99 @@
---
reviewers:
- derekwaynecarr
title: Process ID Limits And Reservations
content_type: concept
weight: 40
---
<!-- overview -->
{{< feature-state for_k8s_version="v1.20" state="stable" >}}
Kubernetes allow you to limit the number of process IDs (PIDs) that a {{< glossary_tooltip term_id="Pod" text="Pod" >}} can use.
You can also reserve a number of allocatable PIDs for each {{< glossary_tooltip term_id="node" text="node" >}}
for use by the operating system and daemons (rather than by Pods).
<!-- body -->
Process IDs (PIDs) are a fundamental resource on nodes. It is trivial to hit the
task limit without hitting any other resource limits, which can then cause
instability to a host machine.
Cluster administrators require mechanisms to ensure that Pods running in the
cluster cannot induce PID exhaustion that prevents host daemons (such as the
{{< glossary_tooltip text="kubelet" term_id="kubelet" >}} or
{{< glossary_tooltip text="kube-proxy" term_id="kube-proxy" >}},
and potentially also the container runtime) from running.
In addition, it is important to ensure that PIDs are limited among Pods in order
to ensure they have limited impact on other workloads on the same node.
{{< note >}}
On certain Linux installations, the operating system sets the PIDs limit to a low default,
such as `32768`. Consider raising the value of `/proc/sys/kernel/pid_max`.
{{< /note >}}
You can configure a kubelet to limit the number of PIDs a given pod can consume.
For example, if your node's host OS is set to use a maximum of `262144` PIDs and
expect to host less than `250` pods, one can give each pod a budget of `1000`
PIDs to prevent using up that node's overall number of available PIDs. If the
admin wants to overcommit PIDs similar to CPU or memory, they may do so as well
with some additional risks. Either way, a single pod will not be able to bring
the whole machine down. This kind of resource limiting helps to prevent simple
fork bombs from affecting operation of an entire cluster.
Per-pod PID limiting allows administrators to protect one pod from another, but
does not ensure that all Pods scheduled onto that host are unable to impact the node overall.
Per-Pod limiting also does not protect the node agents themselves from PID exhaustion.
You can also reserve an amount of PIDs for node overhead, separate from the
allocation to Pods. This is similar to how you can reserve CPU, memory, or other
resources for use by the operating system and other facilities outside of Pods
and their containers.
PID limiting is a an important sibling to [compute
resource](/docs/concepts/configuration/manage-resources-containers/) requests
and limits. However, you specify it in a different way: rather than defining a
Pod's resource limit in the `.spec` for a Pod, you configure the limit as a
setting on the kubelet. Pod-defined PID limits are not currently supported.
{{< caution >}}
This means that the limit that applies to a Pod may be different depending on
where the Pod is scheduled. To make things simple, it's easiest if all Nodes use
the same PID resource limits and reservations.
{{< /caution >}}
## Node PID limits
Kubernetes allows you to reserve a number of process IDs for the system use. To
configure the reservation, use the parameter `pid=<number>` in the
`--system-reserved` and `--kube-reserved` command line options to the kubelet.
The value you specified declares that the specified number of process IDs will
be reserved for the system as a whole and for Kubernetes system daemons
respectively.
{{< note >}}
Before Kubernetes version 1.20, PID resource limiting with Node-level
reservations required enabling the [feature
gate](/docs/reference/command-line-tools-reference/feature-gates/)
`SupportNodePidsLimit` to work.
{{< /note >}}
## Pod PID limits
Kubernetes allows you to limit the number of processes running in a Pod. You
specify this limit at the node level, rather than configuring it as a resource
limit for a particular Pod. Each Node can have a different PID limit.
To configure the limit, you can specify the command line parameter `--pod-max-pids` to the kubelet, or set `PodPidsLimit` in the kubelet [configuration file](/docs/tasks/administer-cluster/kubelet-config-file/).
{{< note >}}
Before Kubernetes version 1.20, PID resource limiting for Pods required enabling
the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/)
`SupportPodPidsLimit` to work.
{{< /note >}}
## {{% heading "whatsnext" %}}
- Refer to the [PID Limiting enhancement document](https://github.com/kubernetes/enhancements/blob/097b4d8276bc9564e56adf72505d43ce9bc5e9e8/keps/sig-node/20190129-pid-limiting.md) for more information.
- For historical context, read [Process ID Limiting for Stability Improvements in Kubernetes 1.14](/blog/2019/04/15/process-id-limiting-for-stability-improvements-in-kubernetes-1.14/).
- Read [Managing Resources for Containers](/docs/concepts/configuration/manage-resources-containers/).

View File

@ -4,7 +4,7 @@ reviewers:
- tallclair
title: Pod Security Policies
content_type: concept
weight: 20
weight: 30
---
<!-- overview -->

View File

@ -3,7 +3,7 @@ reviewers:
- derekwaynecarr
title: Resource Quotas
content_type: concept
weight: 10
weight: 20
---
<!-- overview -->

View File

@ -158,6 +158,49 @@ If you remove or change the label of the node where the pod is scheduled, the po
The `weight` field in `preferredDuringSchedulingIgnoredDuringExecution` is in the range 1-100. For each node that meets all of the scheduling requirements (resource request, RequiredDuringScheduling affinity expressions, etc.), the scheduler will compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding MatchExpressions. This score is then combined with the scores of other priority functions for the node. The node(s) with the highest total score are the most preferred.
#### Node affinity per scheduling profile
{{< feature-state for_k8s_version="v1.20" state="beta" >}}
When configuring multiple [scheduling profiles](/docs/reference/scheduling/config/#multiple-profiles), you can associate
a profile with a Node affinity, which is useful if a profile only applies to a specific set of Nodes.
To do so, add an `addedAffinity` to the args of the [`NodeAffinity` plugin](/docs/reference/scheduling/config/#scheduling-plugins)
in the [scheduler configuration](/docs/reference/scheduling/config/). For example:
```yaml
apiVersion: kubescheduler.config.k8s.io/v1beta1
kind: KubeSchedulerConfiguration
profiles:
- schedulerName: default-scheduler
- schedulerName: foo-scheduler
pluginConfig:
- name: NodeAffinity
args:
addedAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: scheduler-profile
operator: In
values:
- foo
```
The `addedAffinity` is applied to all Pods that set `.spec.schedulerName` to `foo-scheduler`, in addition to the
NodeAffinity specified in the PodSpec.
That is, in order to match the Pod, Nodes need to satisfy `addedAffinity` and the Pod's `.spec.NodeAffinity`.
Since the `addedAffinity` is not visible to end users, its behavior might be unexpected to them. We
recommend to use node labels that have clear correlation with the profile's scheduler name.
{{< note >}}
The DaemonSet controller, which [creates Pods for DaemonSets](/docs/concepts/workloads/controllers/daemonset/#scheduled-by-default-scheduler)
is not aware of scheduling profiles. For this reason, it is recommended that you keep a scheduler profile, such as the
`default-scheduler`, without any `addedAffinity`. Then, the Daemonset's Pod template should use this scheduler name.
Otherwise, some Pods created by the Daemonset controller might remain unschedulable.
{{< /note >}}
### Inter-pod affinity and anti-affinity
Inter-pod affinity and anti-affinity allow you to constrain which nodes your pod is eligible to be scheduled *based on

View File

@ -48,7 +48,7 @@ that uses around 120MiB per Pod for the virtual machine and the guest OS:
```yaml
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1beta1
apiVersion: node.k8s.io/v1
metadata:
name: kata-fc
handler: kata-fc

View File

@ -168,11 +168,7 @@ record unless `publishNotReadyAddresses=True` is set on the Service.
### Pod's setHostnameAsFQDN field {#pod-sethostnameasfqdn-field}
{{< feature-state for_k8s_version="v1.19" state="alpha" >}}
**Prerequisites**: The `SetHostnameAsFQDN` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/)
must be enabled for the
{{< glossary_tooltip text="API Server" term_id="kube-apiserver" >}}
{{< feature-state for_k8s_version="v1.20" state="beta" >}}
When a Pod is configured to have fully qualified domain name (FQDN), its hostname is the short hostname. For example, if you have a Pod with the fully qualified domain name `busybox-1.default-subdomain.my-namespace.svc.cluster-domain.example`, then by default the `hostname` command inside that Pod returns `busybox-1` and the `hostname --fqdn` command returns the FQDN.

View File

@ -3,6 +3,7 @@ reviewers:
- lachie83
- khenidak
- aramase
- bridgetkromhout
title: IPv4/IPv6 dual-stack
feature:
title: IPv4/IPv6 dual-stack
@ -30,14 +31,17 @@ If you enable IPv4/IPv6 dual-stack networking for your Kubernetes cluster, the c
Enabling IPv4/IPv6 dual-stack on your Kubernetes cluster provides the following features:
* Dual-stack Pod networking (a single IPv4 and IPv6 address assignment per Pod)
* IPv4 and IPv6 enabled Services (each Service must be for a single address family)
* IPv4 and IPv6 enabled Services
* Pod off-cluster egress routing (eg. the Internet) via both IPv4 and IPv6 interfaces
## Prerequisites
The following prerequisites are needed in order to utilize IPv4/IPv6 dual-stack Kubernetes clusters:
* Kubernetes 1.16 or later
* Kubernetes 1.20 or later
For information about using dual-stack services with earlier
Kubernetes versions, refer to the documentation for that version
of Kubernetes.
* Provider support for dual-stack networking (Cloud provider or otherwise must be able to provide Kubernetes nodes with routable IPv4/IPv6 network interfaces)
* A network plugin that supports dual-stack (such as Kubenet or Calico)
@ -68,47 +72,173 @@ An example of an IPv6 CIDR: `fdXY:IJKL:MNOP:15::/64` (this shows the format but
## Services
If your cluster has IPv4/IPv6 dual-stack networking enabled, you can create {{< glossary_tooltip text="Services" term_id="service" >}} with either an IPv4 or an IPv6 address. You can choose the address family for the Service's cluster IP by setting a field, `.spec.ipFamily`, on that Service.
You can only set this field when creating a new Service. Setting the `.spec.ipFamily` field is optional and should only be used if you plan to enable IPv4 and IPv6 {{< glossary_tooltip text="Services" term_id="service" >}} and {{< glossary_tooltip text="Ingresses" term_id="ingress" >}} on your cluster. The configuration of this field not a requirement for [egress](#egress-traffic) traffic.
If your cluster has dual-stack enabled, you can create {{< glossary_tooltip text="Services" term_id="service" >}} which can use IPv4, IPv6, or both.
The address family of a Service defaults to the address family of the first service cluster IP range (configured via the `--service-cluster-ip-range` flag to the kube-controller-manager).
When you define a Service you can optionally configure it as dual stack. To specify the behavior you want, you
set the `.spec.ipFamilyPolicy` field to one of the following values:
* `SingleStack`: Single-stack service. The control plane allocates a cluster IP for the Service, using the first configured service cluster IP range.
* `PreferDualStack`:
* Only used if the cluster has dual-stack enabled. Allocates IPv4 and IPv6 cluster IPs for the Service
* If the cluster does not have dual-stack enabled, this setting follows the same behavior as `SingleStack`.
* `RequireDualStack`: Allocates Service `.spec.ClusterIPs` from both IPv4 and IPv6 address ranges.
* Selects the `.spec.ClusterIP` from the list of `.spec.ClusterIPs` based on the address family of the first element in the `.spec.ipFamilies` array.
* The cluster must have dual-stack networking configured.
If you would like to define which IP family to use for single stack or define the order of IP families for dual-stack, you can choose the address families by setting an optional field, `.spec.ipFamilies`, on the Service.
{{< note >}}
The default address family for your cluster is the address family of the first service cluster IP range configured via the `--service-cluster-ip-range` flag to the kube-controller-manager.
The `.spec.ipFamilies` field is immutable because the `.spec.ClusterIP` cannot be reallocated on a Service that already exists. If you want to change `.spec.ipFamilies`, delete and recreate the Service.
{{< /note >}}
You can set `.spec.ipFamily` to either:
You can set `.spec.ipFamilies` to any of the following array values:
* `IPv4`: The API server will assign an IP from a `service-cluster-ip-range` that is `ipv4`
* `IPv6`: The API server will assign an IP from a `service-cluster-ip-range` that is `ipv6`
- `["IPv4"]`
- `["IPv6"]`
- `["IPv4","IPv6"]` (dual stack)
- `["IPv6","IPv4"]` (dual stack)
The following Service specification does not include the `ipFamily` field. Kubernetes will assign an IP address (also known as a "cluster IP") from the first configured `service-cluster-ip-range` to this Service.
The first family you list is used for the legacy `.spec.ClusterIP` field.
### Dual-stack Service configuration scenarios
These examples demonstrate the behavior of various dual-stack Service configuration scenarios.
#### Dual-stack options on new Services
1. This Service specification does not explicitly define `.spec.ipFamilyPolicy`. When you create this Service, Kubernetes assigns a cluster IP for the Service from the first configured `service-cluster-ip-range` and sets the `.spec.ipFamilyPolicy` to `SingleStack`. ([Services without selectors](/docs/concepts/services-networking/service/#services-without-selectors) and [headless Services](/docs/concepts/services-networking/service/#headless-services) with selectors will behave in this same way.)
{{< codenew file="service/networking/dual-stack-default-svc.yaml" >}}
The following Service specification includes the `ipFamily` field. Kubernetes will assign an IPv6 address (also known as a "cluster IP") from the configured `service-cluster-ip-range` to this Service.
1. This Service specification explicitly defines `PreferDualStack` in `.spec.ipFamilyPolicy`. When you create this Service on a dual-stack cluster, Kubernetes assigns both IPv4 and IPv6 addresses for the service. The control plane updates the `.spec` for the Service to record the IP address assignments. The field `.spec.ClusterIPs` is the primary field, and contains both assigned IP addresses; `.spec.ClusterIP` is a secondary field with its value calculated from `.spec.ClusterIPs`.
* For the `.spec.ClusterIP` field, the control plane records the IP address that is from the same address family as the first service cluster IP range.
* On a single-stack cluster, the `.spec.ClusterIPs` and `.spec.ClusterIP` fields both only list one address.
* On a cluster with dual-stack enabled, specifying `RequireDualStack` in `.spec.ipFamilyPolicy` behaves the same as `PreferDualStack`.
{{< codenew file="service/networking/dual-stack-ipv6-svc.yaml" >}}
{{< codenew file="service/networking/dual-stack-preferred-svc.yaml" >}}
For comparison, the following Service specification will be assigned an IPv4 address (also known as a "cluster IP") from the configured `service-cluster-ip-range` to this Service.
1. This Service specification explicitly defines `IPv6` and `IPv4` in `.spec.ipFamilies` as well as defining `PreferDualStack` in `.spec.ipFamilyPolicy`. When Kubernetes assigns an IPv6 and IPv4 address in `.spec.ClusterIPs`, `.spec.ClusterIP` is set to the IPv6 address because that is the first element in the `.spec.ClusterIPs` array, overriding the default.
{{< codenew file="service/networking/dual-stack-ipv4-svc.yaml" >}}
{{< codenew file="service/networking/dual-stack-preferred-ipfamilies-svc.yaml" >}}
### Type LoadBalancer
#### Dual-stack defaults on existing Services
On cloud providers which support IPv6 enabled external load balancers, setting the `type` field to `LoadBalancer` in additional to setting `ipFamily` field to `IPv6` provisions a cloud load balancer for your Service.
These examples demonstrate the default behavior when dual-stack is newly enabled on a cluster where Services already exist.
## Egress Traffic
1. When dual-stack is enabled on a cluster, existing Services (whether `IPv4` or `IPv6`) are configured by the control plane to set `.spec.ipFamilyPolicy` to `SingleStack` and set `.spec.ipFamilies` to the address family of the existing Service. The existing Service cluster IP will be stored in `.spec.ClusterIPs`.
The use of publicly routable and non-publicly routable IPv6 address blocks is acceptable provided the underlying {{< glossary_tooltip text="CNI" term_id="cni" >}} provider is able to implement the transport. If you have a Pod that uses non-publicly routable IPv6 and want that Pod to reach off-cluster destinations (eg. the public Internet), you must set up IP masquerading for the egress traffic and any replies. The [ip-masq-agent](https://github.com/kubernetes-sigs/ip-masq-agent) is dual-stack aware, so you can use ip-masq-agent for IP masquerading on dual-stack clusters.
{{< codenew file="service/networking/dual-stack-default-svc.yaml" >}}
## Known Issues
You can validate this behavior by using kubectl to inspect an existing service.
* Kubenet forces IPv4,IPv6 positional reporting of IPs (--cluster-cidr)
```shell
kubectl get svc my-service -o yaml
```
```yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: MyApp
name: my-service
spec:
clusterIP: 10.0.197.123
clusterIPs:
- 10.0.197.123
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: MyApp
type: ClusterIP
status:
loadBalancer: {}
```
1. When dual-stack is enabled on a cluster, existing [headless Services](/docs/concepts/services-networking/service/#headless-services) with selectors are configured by the control plane to set `.spec.ipFamilyPolicy` to `SingleStack` and set `.spec.ipFamilies` to the address family of the first service cluster IP range (configured via the `--service-cluster-ip-range` flag to the kube-controller-manager) even though `.spec.ClusterIP` is set to `None`.
{{< codenew file="service/networking/dual-stack-default-svc.yaml" >}}
You can validate this behavior by using kubectl to inspect an existing headless service with selectors.
```shell
kubectl get svc my-service -o yaml
```
```yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: MyApp
name: my-service
spec:
clusterIP: None
clusterIPs:
- None
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: MyApp
```
#### Switching Services between single-stack and dual-stack
Services can be changed from single-stack to dual-stack and from dual-stack to single-stack.
1. To change a Service from single-stack to dual-stack, change `.spec.ipFamilyPolicy` from `SingleStack` to `PreferDualStack` or `RequireDualStack` as desired. When you change this Service from single-stack to dual-stack, Kubernetes assigns the missing address family so that the Service now has IPv4 and IPv6 addresses.
Edit the Service specification updating the `.spec.ipFamilyPolicy` from `SingleStack` to `PreferDualStack`.
Before:
```yaml
spec:
ipFamilyPolicy: SingleStack
```
After:
```yaml
spec:
ipFamilyPolicy: PreferDualStack
```
1. To change a Service from dual-stack to single-stack, change `.spec.ipFamilyPolicy` from `PreferDualStack` or `RequireDualStack` to `SingleStack`. When you change this Service from dual-stack to single-stack, Kubernetes retains only the first element in the `.spec.ClusterIPs` array, and sets `.spec.ClusterIP` to that IP address and sets `.spec.ipFamilies` to the address family of `.spec.ClusterIPs`.
### Headless Services without selector
For [Headless Services without selectors](/docs/concepts/services-networking/service/#without-selectors) and without `.spec.ipFamilyPolicy` explicitly set, the `.spec.ipFamilyPolicy` field defaults to `RequireDualStack`.
### Service type LoadBalancer
To provision a dual-stack load balancer for your Service:
* Set the `.spec.type` field to `LoadBalancer`
* Set `.spec.ipFamilyPolicy` field to `PreferDualStack` or `RequireDualStack`
{{< note >}}
To use a dual-stack `LoadBalancer` type Service, your cloud provider must support IPv4 and IPv6 load balancers.
{{< /note >}}
## Egress traffic
If you want to enable egress traffic in order to reach off-cluster destinations (eg. the public Internet) from a Pod that uses non-publicly routable IPv6 addresses, you need to enable the Pod to use a publicly routed IPv6 address via a mechanism such as transparent proxying or IP masquerading. The [ip-masq-agent](https://github.com/kubernetes-sigs/ip-masq-agent) project supports IP masquerading on dual-stack clusters.
{{< note >}}
Ensure your {{< glossary_tooltip text="CNI" term_id="cni" >}} provider supports IPv6.
{{< /note >}}
## {{% heading "whatsnext" %}}
* [Validate IPv4/IPv6 dual-stack](/docs/tasks/network/validate-dual-stack) networking

View File

@ -44,7 +44,7 @@ for any Kubernetes Service that has a {{< glossary_tooltip text="selector"
term_id="selector" >}} specified. These EndpointSlices include
references to all the Pods that match the Service selector. EndpointSlices group
network endpoints together by unique combinations of protocol, port number, and
Service name.
Service name.
The name of a EndpointSlice object must be a valid
[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names).
@ -93,8 +93,58 @@ EndpointSlices support three address types:
* IPv6
* FQDN (Fully Qualified Domain Name)
### Conditions
The EndpointSlice API stores conditions about endpoints that may be useful for consumers.
The three conditions are `ready`, `serving`, and `terminating`.
#### Ready
`ready` is a condition that maps to a Pod's `Ready` condition. A running Pod with the `Ready`
condition set to `True` should have this EndpointSlice condition also set to `true`. For
compatibility reasons, `ready` is NEVER `true` when a Pod is terminating. Consumers should refer
to the `serving` condition to inspect the readiness of terminating Pods. The only exception to
this rule is for Services with `spec.publishNotReadyAddresses` set to `true`. Endpoints for these
Services will always have the `ready` condition set to `true`.
#### Serving
{{< feature-state for_k8s_version="v1.20" state="alpha" >}}
`serving` is identical to the `ready` condition, except it does not account for terminating states.
Consumers of the EndpointSlice API should check this condition if they care about pod readiness while
the pod is also terminating.
{{< note >}}
Although `serving` is almost identical to `ready`, it was added to prevent break the existing meaning
of `ready`. It may be unexpected for existing clients if `ready` could be `true` for terminating
endpoints, since historically terminating endpoints were never included in the Endpoints or
EndpointSlice API to begin with. For this reason, `ready` is _always_ `false` for terminating
endpoints, and a new condition `serving` was added in v1.20 so that clients can track readiness
for terminating pods independent of the existing semantics for `ready`.
{{< /note >}}
#### Terminating
{{< feature-state for_k8s_version="v1.20" state="alpha" >}}
`Terminating` is a condition that indicates whether an endpoint is terminating.
For pods, this is any pod that has a deletion timestamp set.
### Topology information {#topology}
{{< feature-state for_k8s_version="v1.20" state="deprecated" >}}
{{< note >}}
The topology field in EndpointSlices has been deprecated and will be removed in
a future release. A new `nodeName` field will be used instead of setting
`kubernetes.io/hostname` in topology. It was determined that other topology
fields covering zone and region would be better represented as EndpointSlice
labels that would apply to all endpoints within the EndpointSlice.
{{< /note >}}
Each endpoint within an EndpointSlice can contain relevant topology information.
This is used to indicate where an endpoint is, containing information about the
corresponding Node, zone, and region. When the values are available, the

View File

@ -203,10 +203,15 @@ described in detail in [EndpointSlices](/docs/concepts/services-networking/endpo
### Application protocol
{{< feature-state for_k8s_version="v1.19" state="beta" >}}
{{< feature-state for_k8s_version="v1.20" state="stable" >}}
The `AppProtocol` field provides a way to specify an application protocol for each Service port.
The value of this field is mirrored by corresponding Endpoints and EndpointSlice resources.
The `appProtocol` field provides a way to specify an application protocol for
each Service port. The value of this field is mirrored by the corresponding
Endpoints and EndpointSlice objects.
This field follows standard Kubernetes label syntax. Values should either be
[IANA standard service names](http://www.iana.org/assignments/service-names) or
domain prefixed names such as `mycompany.com/my-custom-protocol`.
## Virtual IPs and service proxies
@ -578,21 +583,12 @@ status:
Traffic from the external load balancer is directed at the backend Pods. The cloud provider decides how it is load balanced.
For LoadBalancer type of Services, when there is more than one port defined, all
ports must have the same protocol and the protocol must be one of `TCP`, `UDP`,
and `SCTP`.
Some cloud providers allow you to specify the `loadBalancerIP`. In those cases, the load-balancer is created
with the user-specified `loadBalancerIP`. If the `loadBalancerIP` field is not specified,
the loadBalancer is set up with an ephemeral IP address. If you specify a `loadBalancerIP`
but your cloud provider does not support the feature, the `loadbalancerIP` field that you
set is ignored.
{{< note >}}
If you're using SCTP, see the [caveat](#caveat-sctp-loadbalancer-service-type) below about the
`LoadBalancer` Service type.
{{< /note >}}
{{< note >}}
On **Azure**, if you want to use a user-specified public type `loadBalancerIP`, you first need
@ -604,6 +600,34 @@ Specify the assigned IP address as loadBalancerIP. Ensure that you have updated
{{< /note >}}
#### Load balancers with mixed protocol types
{{< feature-state for_k8s_version="v1.20" state="alpha" >}}
By default, for LoadBalancer type of Services, when there is more than one port defined, all
ports must have the same protocol, and the protocol must be one which is supported
by the cloud provider.
If the feature gate `MixedProtocolLBService` is enabled for the kube-apiserver it is allowed to use different protocols when there is more than one port defined.
{{< note >}}
The set of protocols that can be used for LoadBalancer type of Services is still defined by the cloud provider.
{{< /note >}}
#### Disabling load balancer NodePort allocation {#load-balancer-nodeport-allocation}
{{< feature-state for_k8s_version="v1.20" state="alpha" >}}
Starting in v1.20, you can optionally disable node port allocation for a Service Type=LoadBalancer by setting
the field `spec.allocateLoadBalancerNodePorts` to `false`. This should only be used for load balancer implementations
that route traffic directly to pods as opposed to using node ports. By default, `spec.allocateLoadBalancerNodePorts`
is `true` and type LoadBalancer Services will continue to allocate node ports. If `spec.allocateLoadBalancerNodePorts`
is set to `false` on an existing Service with allocated node ports, those node ports will NOT be de-allocated automatically.
You must explicitly remove the `nodePorts` entry in every Service port to de-allocate those node ports.
You must enable the `ServiceLBNodePortControl` feature gate to use this field.
#### Internal load balancer
In a mixed environment it is sometimes necessary to route traffic from Services inside the same
@ -1184,6 +1208,36 @@ You can use TCP for any kind of Service, and it's the default network protocol.
You can use UDP for most Services. For type=LoadBalancer Services, UDP support
depends on the cloud provider offering this facility.
### SCTP
{{< feature-state for_k8s_version="v1.20" state="stable" >}}
When using a network plugin that supports SCTP traffic, you can use SCTP for
most Services. For type=LoadBalancer Services, SCTP support depends on the cloud
provider offering this facility. (Most do not).
#### Warnings {#caveat-sctp-overview}
##### Support for multihomed SCTP associations {#caveat-sctp-multihomed}
{{< warning >}}
The support of multihomed SCTP associations requires that the CNI plugin can support the assignment of multiple interfaces and IP addresses to a Pod.
NAT for multihomed SCTP associations requires special logic in the corresponding kernel modules.
{{< /warning >}}
##### Windows {#caveat-sctp-windows-os}
{{< note >}}
SCTP is not supported on Windows based nodes.
{{< /note >}}
##### Userspace kube-proxy {#caveat-sctp-kube-proxy-userspace}
{{< warning >}}
The kube-proxy does not support the management of SCTP associations when it is in userspace mode.
{{< /warning >}}
### HTTP
If your cloud provider supports it, you can use a Service in LoadBalancer mode
@ -1211,42 +1265,6 @@ PROXY TCP4 192.0.2.202 10.0.42.7 12345 7\r\n
followed by the data from the client.
### SCTP
{{< feature-state for_k8s_version="v1.19" state="beta" >}}
Kubernetes supports SCTP as a `protocol` value in Service, Endpoints, EndpointSlice, NetworkPolicy and Pod definitions. As a beta feature, this is enabled by default. To disable SCTP at a cluster level, you (or your cluster administrator) will need to disable the `SCTPSupport` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) for the API server with `--feature-gates=SCTPSupport=false,…`.
When the feature gate is enabled, you can set the `protocol` field of a Service, Endpoints, EndpointSlice, NetworkPolicy or Pod to `SCTP`. Kubernetes sets up the network accordingly for the SCTP associations, just like it does for TCP connections.
#### Warnings {#caveat-sctp-overview}
##### Support for multihomed SCTP associations {#caveat-sctp-multihomed}
{{< warning >}}
The support of multihomed SCTP associations requires that the CNI plugin can support the assignment of multiple interfaces and IP addresses to a Pod.
NAT for multihomed SCTP associations requires special logic in the corresponding kernel modules.
{{< /warning >}}
##### Service with type=LoadBalancer {#caveat-sctp-loadbalancer-service-type}
{{< warning >}}
You can only create a Service with `type` LoadBalancer plus `protocol` SCTP if the cloud provider's load balancer implementation supports SCTP as a protocol. Otherwise, the Service creation request is rejected. The current set of cloud load balancer providers (Azure, AWS, CloudStack, GCE, OpenStack) all lack support for SCTP.
{{< /warning >}}
##### Windows {#caveat-sctp-windows-os}
{{< warning >}}
SCTP is not supported on Windows based nodes.
{{< /warning >}}
##### Userspace kube-proxy {#caveat-sctp-kube-proxy-userspace}
{{< warning >}}
The kube-proxy does not support the management of SCTP associations when it is in userspace mode.
{{< /warning >}}
## {{% heading "whatsnext" %}}
* Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/)

View File

@ -732,12 +732,10 @@ Only statically provisioned volumes are supported for alpha release. Administrat
## Volume Snapshot and Restore Volume from Snapshot Support
{{< feature-state for_k8s_version="v1.17" state="beta" >}}
{{< feature-state for_k8s_version="v1.20" state="stable" >}}
Volume snapshot feature was added to support CSI Volume Plugins only. For details, see [volume snapshots](/docs/concepts/storage/volume-snapshots/).
To enable support for restoring a volume from a volume snapshot data source, enable the
`VolumeSnapshotDataSource` feature gate on the apiserver and controller-manager.
Volume snapshots only support the out-of-tree CSI volume plugins. For details, see [Volume Snapshots](/docs/concepts/storage/volume-snapshots/).
In-tree volume plugins are deprecated. You can read about the deprecated volume plugins in the [Volume Plugin FAQ] (https://github.com/kubernetes/community/blob/master/sig-storage/volume-plugin-faq.md).
### Create a PersistentVolumeClaim from a Volume Snapshot {#create-persistent-volume-claim-from-volume-snapshot}

View File

@ -40,7 +40,7 @@ of a class when first creating VolumeSnapshotClass objects, and the objects cann
be updated once they are created.
```yaml
apiVersion: snapshot.storage.k8s.io/v1beta1
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshotClass
metadata:
name: csi-hostpath-snapclass
@ -54,7 +54,7 @@ that don't request any particular class to bind to by adding the
`snapshot.storage.kubernetes.io/is-default-class: "true"` annotation:
```yaml
apiVersion: snapshot.storage.k8s.io/v1beta1
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshotClass
metadata:
name: csi-hostpath-snapclass

View File

@ -13,7 +13,6 @@ weight: 20
<!-- overview -->
{{< feature-state for_k8s_version="v1.17" state="beta" >}}
In Kubernetes, a _VolumeSnapshot_ represents a snapshot of a volume on a storage system. This document assumes that you are already familiar with Kubernetes [persistent volumes](/docs/concepts/storage/persistent-volumes/).
@ -37,7 +36,8 @@ Users need to be aware of the following when using this feature:
* API Objects `VolumeSnapshot`, `VolumeSnapshotContent`, and `VolumeSnapshotClass` are {{< glossary_tooltip term_id="CustomResourceDefinition" text="CRDs" >}}, not part of the core API.
* `VolumeSnapshot` support is only available for CSI drivers.
* As part of the deployment process in the beta version of `VolumeSnapshot`, the Kubernetes team provides a snapshot controller to be deployed into the control plane, and a sidecar helper container called csi-snapshotter to be deployed together with the CSI driver. The snapshot controller watches `VolumeSnapshot` and `VolumeSnapshotContent` objects and is responsible for the creation and deletion of `VolumeSnapshotContent` object in dynamic provisioning. The sidecar csi-snapshotter watches `VolumeSnapshotContent` objects and triggers `CreateSnapshot` and `DeleteSnapshot` operations against a CSI endpoint.
* As part of the deployment process of `VolumeSnapshot`, the Kubernetes team provides a snapshot controller to be deployed into the control plane, and a sidecar helper container called csi-snapshotter to be deployed together with the CSI driver. The snapshot controller watches `VolumeSnapshot` and `VolumeSnapshotContent` objects and is responsible for the creation and deletion of `VolumeSnapshotContent` object. The sidecar csi-snapshotter watches `VolumeSnapshotContent` objects and triggers `CreateSnapshot` and `DeleteSnapshot` operations against a CSI endpoint.
* There is also a validating webhook server which provides tightened validation on snapshot objects. This should be installed by the Kubernetes distros along with the snapshot controller and CRDs, not CSI drivers. It should be installed in all Kubernetes clusters that has the snapshot feature enabled.
* CSI drivers may or may not have implemented the volume snapshot functionality. The CSI drivers that have provided support for volume snapshot will likely use the csi-snapshotter. See [CSI Driver documentation](https://kubernetes-csi.github.io/docs/) for details.
* The CRDs and snapshot controller installations are the responsibility of the Kubernetes distribution.
@ -78,7 +78,7 @@ Deletion is triggered by deleting the `VolumeSnapshot` object, and the `Deletion
Each VolumeSnapshot contains a spec and a status.
```yaml
apiVersion: snapshot.storage.k8s.io/v1beta1
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshot
metadata:
name: new-snapshot-test
@ -97,7 +97,7 @@ using the attribute `volumeSnapshotClassName`. If nothing is set, then the defau
For pre-provisioned snapshots, you need to specify a `volumeSnapshotContentName` as the source for the snapshot as shown in the following example. The `volumeSnapshotContentName` source field is required for pre-provisioned snapshots.
```yaml
apiVersion: snapshot.storage.k8s.io/v1beta1
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshot
metadata:
name: test-snapshot
@ -111,7 +111,7 @@ spec:
Each VolumeSnapshotContent contains a spec and status. In dynamic provisioning, the snapshot common controller creates `VolumeSnapshotContent` objects. Here is an example:
```yaml
apiVersion: snapshot.storage.k8s.io/v1beta1
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshotContent
metadata:
name: snapcontent-72d9a349-aacd-42d2-a240-d775650d2455
@ -132,7 +132,7 @@ spec:
For pre-provisioned snapshots, you (as cluster administrator) are responsible for creating the `VolumeSnapshotContent` object as follows.
```yaml
apiVersion: snapshot.storage.k8s.io/v1beta1
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshotContent
metadata:
name: new-snapshot-content-test
@ -154,4 +154,4 @@ You can provision a new volume, pre-populated with data from a snapshot, by usin
the *dataSource* field in the `PersistentVolumeClaim` object.
For more details, see
[Volume Snapshot and Restore Volume from Snapshot](/docs/concepts/storage/persistent-volumes/#volume-snapshot-and-restore-volume-from-snapshot-support).
[Volume Snapshot and Restore Volume from Snapshot](/docs/concepts/storage/persistent-volumes/#volume-snapshot-and-restore-volume-from-snapshot-support).

View File

@ -303,6 +303,12 @@ While tmpfs is very fast, be aware that unlike disks, tmpfs is cleared on
node reboot and any files you write count against your container's
memory limit.
{{< note >}}
If the `SizeMemoryBackedVolumes` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled,
you can specify a size for memory backed volumes. If no size is specified, memory
backed volumes are sized to 50% of the memory on a Linux host.
{{< /note>}}
#### emptyDir configuration example
```yaml

View File

@ -32,8 +32,6 @@ The name must be no longer than 52 characters. This is because the CronJob contr
append 11 characters to the job name provided and there is a constraint that the
maximum length of a Job name is no more than 63 characters.
<!-- body -->
## CronJob
@ -82,6 +80,14 @@ be down for the same period as the previous example (`08:29:00` to `10:21:00`,)
The CronJob is only responsible for creating Jobs that match its schedule, and
the Job in turn is responsible for the management of the Pods it represents.
## New controller
There's an alternative implementation of the CronJob controller, available as an alpha feature since Kubernetes 1.20. To select version 2 of the CronJob controller, pass the following [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) flag to the {{< glossary_tooltip term_id="kube-controller-manager" text="kube-controller-manager" >}}.
```
--feature-gates="CronJobControllerV2=true"
```
## {{% heading "whatsnext" %}}

View File

@ -59,11 +59,22 @@ metadata:
```
{{< note >}}
Cross-namespace owner references are disallowed by design. This means:
1) Namespace-scoped dependents can only specify owners in the same namespace,
and owners that are cluster-scoped.
2) Cluster-scoped dependents can only specify cluster-scoped owners, but not
namespace-scoped owners.
Cross-namespace owner references are disallowed by design.
Namespaced dependents can specify cluster-scoped or namespaced owners.
A namespaced owner **must** exist in the same namespace as the dependent.
If it does not, the owner reference is treated as absent, and the dependent
is subject to deletion once all owners are verified absent.
Cluster-scoped dependents can only specify cluster-scoped owners.
In v1.20+, if a cluster-scoped dependent specifies a namespaced kind as an owner,
it is treated as having an unresolveable owner reference, and is not able to be garbage collected.
In v1.20+, if the garbage collector detects an invalid cross-namespace `ownerReference`,
or a cluster-scoped dependent with an `ownerReference` referencing a namespaced kind, a warning Event
with a reason of `OwnerRefInvalidNamespace` and an `involvedObject` of the invalid dependent is reported.
You can check for that kind of Event by running
`kubectl get events -A --field-selector=reason=OwnerRefInvalidNamespace`.
{{< /note >}}
## Controlling how the garbage collector deletes dependents

View File

@ -257,7 +257,6 @@ but cannot be controlled from there.
## {{% heading "whatsnext" %}}
* Learn about the [lifecycle of a Pod](/docs/concepts/workloads/pods/pod-lifecycle/).
* Learn about [PodPresets](/docs/concepts/workloads/pods/podpreset/).
* Learn about [RuntimeClass](/docs/concepts/containers/runtime-class/) and how you can use it to
configure different Pods with different container runtime configurations.
* Read about [Pod topology spread constraints](/docs/concepts/workloads/pods/pod-topology-spread-constraints/).

View File

@ -90,7 +90,7 @@ enabled, and Kubernetes client and server version v1.16 or later.
{{< /note >}}
The examples in this section demonstrate how ephemeral containers appear in
the API. You would normally use `kubectl alpha debug` or another `kubectl`
the API. You would normally use `kubectl debug` or another `kubectl`
[plugin](/docs/tasks/extend-kubectl/kubectl-plugins/) to automate these steps
rather than invoking the API directly.

View File

@ -315,7 +315,7 @@ to stop.
### When should you use a startup probe?
{{< feature-state for_k8s_version="v1.18" state="beta" >}}
{{< feature-state for_k8s_version="v1.20" state="stable" >}}
Startup probes are useful for Pods that have containers that take a long time to
come into service. Rather than set a long liveness interval, you can configure

View File

@ -310,6 +310,7 @@ profiles:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
defaultingType: List
```
{{< note >}}
@ -322,9 +323,9 @@ using default constraints for `PodTopologySpread`.
#### Internal default constraints
{{< feature-state for_k8s_version="v1.19" state="alpha" >}}
{{< feature-state for_k8s_version="v1.20" state="beta" >}}
When you enable the `DefaultPodTopologySpread` feature gate, the
With the `DefaultPodTopologySpread` feature gate, enabled by default, the
legacy `SelectorSpread` plugin is disabled.
kube-scheduler uses the following default topology constraints for the
`PodTopologySpread` plugin configuration:
@ -351,6 +352,22 @@ The `PodTopologySpread` plugin does not score the nodes that don't have
the topology keys specified in the spreading constraints.
{{< /note >}}
If you don't want to use the default Pod spreading constraints for your cluster,
you can disable those defaults by setting `defaultingType` to `List` and leaving
empty `defaultConstraints` in the `PodTopologySpread` plugin configuration:
```yaml
apiVersion: kubescheduler.config.k8s.io/v1beta1
kind: KubeSchedulerConfiguration
profiles:
- pluginConfig:
- name: PodTopologySpread
args:
defaultConstraints: []
defaultingType: List
```
## Comparison with PodAffinity/PodAntiAffinity
In Kubernetes, directives related to "Affinity" control how Pods are

View File

@ -1,91 +0,0 @@
---
reviewers:
- jessfraz
title: Pod Presets
content_type: concept
weight: 50
---
<!-- overview -->
{{< feature-state for_k8s_version="v1.6" state="alpha" >}}
This page provides an overview of PodPresets, which are objects for injecting
certain information into pods at creation time. The information can include
secrets, volumes, volume mounts, and environment variables.
<!-- body -->
## Understanding Pod presets
A PodPreset is an API resource for injecting additional runtime requirements
into a Pod at creation time.
You use [label selectors](/docs/concepts/overview/working-with-objects/labels/#label-selectors)
to specify the Pods to which a given PodPreset applies.
Using a PodPreset allows pod template authors to not have to explicitly provide
all information for every pod. This way, authors of pod templates consuming a
specific service do not need to know all the details about that service.
## Enable PodPreset in your cluster {#enable-pod-preset}
In order to use Pod presets in your cluster you must ensure the following:
1. You have enabled the API type `settings.k8s.io/v1alpha1/podpreset`. For
example, this can be done by including `settings.k8s.io/v1alpha1=true` in
the `--runtime-config` option for the API server. In minikube add this flag
`--extra-config=apiserver.runtime-config=settings.k8s.io/v1alpha1=true` while
starting the cluster.
1. You have enabled the admission controller named `PodPreset`. One way to doing this
is to include `PodPreset` in the `--enable-admission-plugins` option value specified
for the API server. For example, if you use Minikube, add this flag:
```shell
--extra-config=apiserver.enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,PodPreset
```
while starting your cluster.
## How it works
Kubernetes provides an admission controller (`PodPreset`) which, when enabled,
applies Pod Presets to incoming pod creation requests.
When a pod creation request occurs, the system does the following:
1. Retrieve all `PodPresets` available for use.
1. Check if the label selectors of any `PodPreset` matches the labels on the
pod being created.
1. Attempt to merge the various resources defined by the `PodPreset` into the
Pod being created.
1. On error, throw an event documenting the merge error on the pod, and create
the pod _without_ any injected resources from the `PodPreset`.
1. Annotate the resulting modified Pod spec to indicate that it has been
modified by a `PodPreset`. The annotation is of the form
`podpreset.admission.kubernetes.io/podpreset-<pod-preset name>: "<resource version>"`.
Each Pod can be matched by zero or more PodPresets; and each PodPreset can be
applied to zero or more Pods. When a PodPreset is applied to one or more
Pods, Kubernetes modifies the Pod Spec. For changes to `env`, `envFrom`, and
`volumeMounts`, Kubernetes modifies the container spec for all containers in
the Pod; for changes to `volumes`, Kubernetes modifies the Pod Spec.
{{< note >}}
A Pod Preset is capable of modifying the following fields in a Pod spec when appropriate:
- The `.spec.containers` field
- The `.spec.initContainers` field
{{< /note >}}
### Disable Pod Preset for a specific pod
There may be instances where you wish for a Pod to not be altered by any Pod
preset mutations. In these cases, you can add an annotation in the Pod's `.spec`
of the form: `podpreset.admission.kubernetes.io/exclude: "true"`.
## {{% heading "whatsnext" %}}
See [Injecting data into a Pod using PodPreset](/docs/tasks/inject-data-application/podpreset/)
For more information about the background, see the [design proposal for PodPreset](https://git.k8s.io/community/contributors/design-proposals/service-catalog/pod-preset.md).

View File

@ -725,10 +725,15 @@ See the [resourceQuota design doc](https://git.k8s.io/community/contributors/des
### RuntimeClass {#runtimeclass}
{{< feature-state for_k8s_version="v1.16" state="alpha" >}}
{{< feature-state for_k8s_version="v1.20" state="stable" >}}
For [RuntimeClass](/docs/concepts/containers/runtime-class/) definitions which describe an overhead associated with running a pod,
this admission controller will set the pod.Spec.Overhead field accordingly.
If you enable the `PodOverhead` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/), and define a RuntimeClass with [Pod overhead](/docs/concepts/scheduling-eviction/pod-overhead/) configured, this admission controller checks incoming
Pods. When enabled, this admission controller rejects any Pod create requests that have the overhead already set.
For Pods that have a RuntimeClass is configured and selected in their `.spec`, this admission controller sets `.spec.overhead` in the Pod based on the value defined in the corresponding RuntimeClass.
{{< note >}}
The `.spec.overhead` field for Pod and the `.overhead` field for RuntimeClass are both in beta. If you do not enable the `PodOverhead` feature gate, all Pods are treated as if `.spec.overhead` is unset.
{{< /note >}}
See also [Pod Overhead](/docs/concepts/scheduling-eviction/pod-overhead/)
for more information.

View File

@ -908,11 +908,22 @@ users:
On Fedora: dnf install example-client-go-exec-plugin
...
# Whether or not to provide cluster information, which could potentially contain
# very large CA data, to this exec plugin as a part of the KUBERNETES_EXEC_INFO
# environment variable.
provideClusterInfo: true
clusters:
- name: my-cluster
cluster:
server: "https://172.17.4.100:6443"
certificate-authority: "/etc/kubernetes/ca.pem"
extensions:
- name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config
extension:
arbitrary: config
this: can be provided via the KUBERNETES_EXEC_INFO environment variable upon setting provideClusterInfo
you: ["can", "put", "anything", "here"]
contexts:
- name: my-cluster
context:
@ -994,3 +1005,28 @@ RFC3339 timestamp. Presence or absence of an expiry has the following impact:
}
```
The plugin can optionally be called with an environment variable, `KUBERNETES_EXEC_INFO`,
that contains information about the cluster for which this plugin is obtaining
credentials. This information can be used to perform cluster-specific credential
acquisition logic. In order to enable this behavior, the `provideClusterInfo` field must
be set on the exec user field in the
[kubeconfig](/docs/concepts/configuration/organize-cluster-access-kubeconfig/). Here is an
example of the aforementioned `KUBERNETES_EXEC_INFO` environment variable.
```json
{
"apiVersion": "client.authentication.k8s.io/v1beta1",
"kind": "ExecCredential",
"spec": {
"cluster": {
"server": "https://172.17.4.100:6443",
"certificate-authority-data": "LS0t...",
"config": {
"arbitrary": "config",
"this": "can be provided via the KUBERNETES_EXEC_INFO environment variable upon setting provideClusterInfo",
"you": ["can", "put", "anything", "here"]
}
}
}
}
```

View File

@ -801,7 +801,12 @@ This is commonly used by add-on API servers for unified authentication and autho
<td>None</td>
<td>Allows access to the resources required by most <a href="/docs/concepts/storage/persistent-volumes/#provisioner">dynamic volume provisioners</a>.</td>
</tr>
<tbody>
<tr>
<td><b>system:monitoring</b></td>
<td><b>system:monitoring</b> group</td>
<td>Allows read access to control-plane monitoring endpoints (i.e. {{< glossary_tooltip term_id="kube-apiserver" text="kube-apiserver" >}} liveness and readiness endpoints (<tt>/healthz</tt>, <tt>/livez</tt>, <tt>/readyz</tt>), the individual health-check endpoints (<tt>/healthz/*</tt>, <tt>/livez/*</tt>, <tt>/readyz/*</tt>), and <tt>/metrics</tt>). Note that individual health check endpoints and the metric endpoint may expose sensitive information.</td>
</tr>
</tbody>
</table>
### Roles for built-in controllers {#controller-roles}

View File

@ -10,7 +10,7 @@ weight: 50
---
<!-- overview -->
This is a Cluster Administrator guide to service accounts. You should be familiar with
This is a Cluster Administrator guide to service accounts. You should be familiar with
[configuring Kubernetes service accounts](/docs/tasks/configure-pod-container/configure-service-account/).
Support for authorization and user accounts is planned but incomplete. Sometimes
@ -53,6 +53,19 @@ It is part of the API server.
It acts synchronously to modify pods as they are created or updated. When this plugin is active
(and it is by default on most distributions), then it does the following when a pod is created or modified:
1. If the pod does not have a `ServiceAccount` set, it sets the `ServiceAccount` to `default`.
1. It ensures that the `ServiceAccount` referenced by the pod exists, and otherwise rejects it.
1. If the pod does not contain any `ImagePullSecrets`, then `ImagePullSecrets` of the `ServiceAccount` are added to the pod.
1. It adds a `volume` to the pod which contains a token for API access.
1. It adds a `volumeSource` to each container of the pod mounted at `/var/run/secrets/kubernetes.io/serviceaccount`.
#### Bound Service Account Token Volume
{{< feature-state for_k8s_version="v1.13" state="alpha" >}}
When the `BoundServiceAccountTokenVolume` feature gate is enabled, the service account admission controller will
add a projected service account token volume instead of a secret volume. The service account token will expire after 1 hour by default or the pod is deleted. See more details about [projected volume](/docs/tasks/configure-pod-container/configure-projected-volume-storage/).
This feature depends on the `RootCAConfigMap` feature gate enabled which publish a "kube-root-ca.crt" ConfigMap to every namespace. This ConfigMap contains a CA bundle used for verifying connections to the kube-apiserver.
1. If the pod does not have a `serviceAccountName` set, it sets the
`serviceAccountName` to `default`.
1. It ensures that the `serviceAccountName` referenced by the pod exists, and

File diff suppressed because one or more lines are too long

View File

@ -51,8 +51,10 @@ different Kubernetes components.
| `AnyVolumeDataSource` | `false` | Alpha | 1.18 | |
| `APIListChunking` | `false` | Alpha | 1.8 | 1.8 |
| `APIListChunking` | `true` | Beta | 1.9 | |
| `APIPriorityAndFairness` | `false` | Alpha | 1.17 | |
| `APIPriorityAndFairness` | `false` | Alpha | 1.17 | 1.19 |
| `APIPriorityAndFairness` | `true` | Beta | 1.20 | |
| `APIResponseCompression` | `false` | Alpha | 1.7 | |
| `APIServerIdentity` | `false` | Alpha | 1.20 | |
| `AppArmor` | `true` | Beta | 1.4 | |
| `BalanceAttachedNodeVolumes` | `false` | Alpha | 1.11 | |
| `BoundServiceAccountTokenVolume` | `false` | Alpha | 1.13 | |
@ -79,14 +81,23 @@ different Kubernetes components.
| `CSIMigrationOpenStackComplete` | `false` | Alpha | 1.17 | |
| `CSIMigrationvSphere` | `false` | Beta | 1.19 | |
| `CSIMigrationvSphereComplete` | `false` | Beta | 1.19 | |
| `CSIServiceAccountToken` | `false` | Alpha | 1.20 | |
| `CSIStorageCapacity` | `false` | Alpha | 1.19 | |
| `CSIVolumeFSGroupPolicy` | `false` | Alpha | 1.19 | |
| `ConfigurableFSGroupPolicy` | `false` | Alpha | 1.18 | |
| `CSIVolumeFSGroupPolicy` | `false` | Alpha | 1.19 | 1.19 |
| `CSIVolumeFSGroupPolicy` | `true` | Beta | 1.20 | |
| `ConfigurableFSGroupPolicy` | `false` | Alpha | 1.18 | 1.19 |
| `ConfigurableFSGroupPolicy` | `true` | Beta | 1.20 | |
| `CronJobControllerV2` | `false` | Alpha | 1.20 | |
| `CustomCPUCFSQuotaPeriod` | `false` | Alpha | 1.12 | |
| `DefaultPodTopologySpread` | `false` | Alpha | 1.19 | |
| `CustomResourceDefaulting` | `false` | Alpha| 1.15 | 1.15 |
| `CustomResourceDefaulting` | `true` | Beta | 1.16 | |
| `DefaultPodTopologySpread` | `false` | Alpha | 1.19 | 1.19 |
| `DefaultPodTopologySpread` | `true` | Beta | 1.20 | |
| `DevicePlugins` | `false` | Alpha | 1.8 | 1.9 |
| `DevicePlugins` | `true` | Beta | 1.10 | |
| `DisableAcceleratorUsageMetrics` | `false` | Alpha | 1.19 | 1.20 |
| `DisableAcceleratorUsageMetrics` | `false` | Alpha | 1.19 | 1.19 |
| `DisableAcceleratorUsageMetrics` | `true` | Beta | 1.20 | 1.22 |
| `DownwardAPIHugePages` | `false` | Alpha | 1.20 | |
| `DryRun` | `false` | Alpha | 1.12 | 1.12 |
| `DryRun` | `true` | Beta | 1.13 | |
| `DynamicKubeletConfig` | `false` | Alpha | 1.4 | 1.10 |
@ -94,8 +105,10 @@ different Kubernetes components.
| `EndpointSlice` | `false` | Alpha | 1.16 | 1.16 |
| `EndpointSlice` | `false` | Beta | 1.17 | |
| `EndpointSlice` | `true` | Beta | 1.18 | |
| `EndpointSliceNodeName` | `false` | Alpha | 1.20 | |
| `EndpointSliceProxying` | `false` | Alpha | 1.18 | 1.18 |
| `EndpointSliceProxying` | `true` | Beta | 1.19 | |
| `EndpointSliceTerminating` | `false` | Alpha | 1.20 | |
| `EphemeralContainers` | `false` | Alpha | 1.16 | |
| `ExpandCSIVolumes` | `false` | Alpha | 1.14 | 1.15 |
| `ExpandCSIVolumes` | `true` | Beta | 1.16 | |
@ -105,6 +118,7 @@ different Kubernetes components.
| `ExpandPersistentVolumes` | `true` | Beta | 1.11 | |
| `ExperimentalHostUserNamespaceDefaulting` | `false` | Beta | 1.5 | |
| `GenericEphemeralVolume` | `false` | Alpha | 1.19 | |
| `GracefulNodeShutdown` | `false` | Alpha | 1.20 | |
| `HPAScaleToZero` | `false` | Alpha | 1.16 | |
| `HugePageStorageMediumSize` | `false` | Alpha | 1.18 | 1.18 |
| `HugePageStorageMediumSize` | `true` | Beta | 1.19 | |
@ -112,12 +126,11 @@ different Kubernetes components.
| `ImmutableEphemeralVolumes` | `false` | Alpha | 1.18 | 1.18 |
| `ImmutableEphemeralVolumes` | `true` | Beta | 1.19 | |
| `IPv6DualStack` | `false` | Alpha | 1.16 | |
| `KubeletPodResources` | `false` | Alpha | 1.13 | 1.14 |
| `KubeletPodResources` | `true` | Beta | 1.15 | |
| `LegacyNodeRoleBehavior` | `true` | Alpha | 1.16 | |
| `LocalStorageCapacityIsolation` | `false` | Alpha | 1.7 | 1.9 |
| `LocalStorageCapacityIsolation` | `true` | Beta | 1.10 | |
| `LocalStorageCapacityIsolationFSQuotaMonitoring` | `false` | Alpha | 1.15 | |
| `MixedProtocolLBService` | `false` | Alpha | 1.20 | |
| `MountContainers` | `false` | Alpha | 1.9 | |
| `NodeDisruptionExclusion` | `false` | Alpha | 1.16 | 1.18 |
| `NodeDisruptionExclusion` | `true` | Beta | 1.19 | |
@ -125,10 +138,13 @@ different Kubernetes components.
| `NonPreemptingPriority` | `true` | Beta | 1.19 | |
| `PodDisruptionBudget` | `false` | Alpha | 1.3 | 1.4 |
| `PodDisruptionBudget` | `true` | Beta | 1.5 | |
| `PodOverhead` | `false` | Alpha | 1.16 | - |
| `PodOverhead` | `false` | Alpha | 1.16 | 1.17 |
| `PodOverhead` | `true` | Beta | 1.18 | |
| `ProcMountType` | `false` | Alpha | 1.12 | |
| `QOSReserved` | `false` | Alpha | 1.11 | |
| `RemainingItemCount` | `false` | Alpha | 1.15 | |
| `RootCAConfigMap` | `false` | Alpha | 1.13 | 1.19 |
| `RootCAConfigMap` | `true` | Beta | 1.20 | |
| `RotateKubeletServerCertificate` | `false` | Alpha | 1.7 | 1.11 |
| `RotateKubeletServerCertificate` | `true` | Beta | 1.12 | |
| `RunAsGroup` | `true` | Beta | 1.14 | |
@ -139,31 +155,20 @@ different Kubernetes components.
| `ServerSideApply` | `false` | Alpha | 1.14 | 1.15 |
| `ServerSideApply` | `true` | Beta | 1.16 | |
| `ServiceAccountIssuerDiscovery` | `false` | Alpha | 1.18 | |
| `ServiceAppProtocol` | `false` | Alpha | 1.18 | 1.18 |
| `ServiceAppProtocol` | `true` | Beta | 1.19 | |
| `ServiceLBNodePortControl` | `false` | Alpha | 1.20 | 1.20 |
| `ServiceNodeExclusion` | `false` | Alpha | 1.8 | 1.18 |
| `ServiceNodeExclusion` | `true` | Beta | 1.19 | |
| `ServiceTopology` | `false` | Alpha | 1.17 | |
| `SetHostnameAsFQDN` | `false` | Alpha | 1.19 | |
| `StartupProbe` | `false` | Alpha | 1.16 | 1.17 |
| `StartupProbe` | `true` | Beta | 1.18 | |
| `SizeMemoryBackedVolumes` | `false` | Alpha | 1.20 | |
| `SetHostnameAsFQDN` | `false` | Alpha | 1.19 | 1.19 |
| `SetHostnameAsFQDN` | `true` | Beta | 1.20 | |
| `StorageVersionHash` | `false` | Alpha | 1.14 | 1.14 |
| `StorageVersionHash` | `true` | Beta | 1.15 | |
| `SupportNodePidsLimit` | `false` | Alpha | 1.14 | 1.14 |
| `SupportNodePidsLimit` | `true` | Beta | 1.15 | |
| `SupportPodPidsLimit` | `false` | Alpha | 1.10 | 1.13 |
| `SupportPodPidsLimit` | `true` | Beta | 1.14 | |
| `Sysctls` | `true` | Beta | 1.11 | |
| `TokenRequest` | `false` | Alpha | 1.10 | 1.11 |
| `TokenRequest` | `true` | Beta | 1.12 | |
| `TokenRequestProjection` | `false` | Alpha | 1.11 | 1.11 |
| `TokenRequestProjection` | `true` | Beta | 1.12 | |
| `TTLAfterFinished` | `false` | Alpha | 1.12 | |
| `TopologyManager` | `false` | Alpha | 1.16 | |
| `ValidateProxyRedirects` | `false` | Alpha | 1.12 | 1.13 |
| `ValidateProxyRedirects` | `true` | Beta | 1.14 | |
| `VolumeSnapshotDataSource` | `false` | Alpha | 1.12 | 1.16 |
| `VolumeSnapshotDataSource` | `true` | Beta | 1.17 | - |
| `WindowsEndpointSliceProxying` | `false` | Alpha | 1.19 | |
| `WindowsGMSA` | `false` | Alpha | 1.14 | |
| `WindowsGMSA` | `true` | Beta | 1.16 | |
@ -235,6 +240,7 @@ different Kubernetes components.
| `EvenPodsSpread` | `false` | Alpha | 1.16 | 1.17 |
| `EvenPodsSpread` | `true` | Beta | 1.18 | 1.18 |
| `EvenPodsSpread` | `true` | GA | 1.19 | - |
| `ExecProbeTimeout` | `true` | GA | 1.20 | - |
| `GCERegionalPersistentDisk` | `true` | Beta | 1.10 | 1.12 |
| `GCERegionalPersistentDisk` | `true` | GA | 1.13 | - |
| `HugePages` | `false` | Alpha | 1.8 | 1.9 |
@ -244,9 +250,13 @@ different Kubernetes components.
| `Initializers` | - | Deprecated | 1.14 | - |
| `KubeletConfigFile` | `false` | Alpha | 1.8 | 1.9 |
| `KubeletConfigFile` | - | Deprecated | 1.10 | - |
| `KubeletCredentialProviders` | `false` | Alpha | 1.20 | 1.20 |
| `KubeletPluginsWatcher` | `false` | Alpha | 1.11 | 1.11 |
| `KubeletPluginsWatcher` | `true` | Beta | 1.12 | 1.12 |
| `KubeletPluginsWatcher` | `true` | GA | 1.13 | - |
| `KubeletPodResources` | `false` | Alpha | 1.13 | 1.14 |
| `KubeletPodResources` | `true` | Beta | 1.15 | |
| `KubeletPodResources` | `true` | GA | 1.20 | |
| `MountPropagation` | `false` | Alpha | 1.8 | 1.9 |
| `MountPropagation` | `true` | Beta | 1.10 | 1.11 |
| `MountPropagation` | `true` | GA | 1.12 | - |
@ -275,12 +285,24 @@ different Kubernetes components.
| `ResourceQuotaScopeSelectors` | `true` | GA | 1.17 | - |
| `RotateKubeletClientCertificate` | `true` | Beta | 1.8 | 1.18 |
| `RotateKubeletClientCertificate` | `true` | GA | 1.19 | - |
| `RuntimeClass` | `false` | Alpha | 1.12 | 1.13 |
| `RuntimeClass` | `true` | Beta | 1.14 | 1.19 |
| `RuntimeClass` | `true` | GA | 1.20 | - |
| `ScheduleDaemonSetPods` | `false` | Alpha | 1.11 | 1.11 |
| `ScheduleDaemonSetPods` | `true` | Beta | 1.12 | 1.16 |
| `ScheduleDaemonSetPods` | `true` | GA | 1.17 | - |
| `SCTPSupport` | `false` | Alpha | 1.12 | 1.18 |
| `SCTPSupport` | `true` | Beta | 1.19 | 1.19 |
| `SCTPSupport` | `true` | GA | 1.20 | - |
| `ServiceAppProtocol` | `false` | Alpha | 1.18 | 1.18 |
| `ServiceAppProtocol` | `true` | Beta | 1.19 | |
| `ServiceAppProtocol` | `true` | GA | 1.20 | - |
| `ServiceLoadBalancerFinalizer` | `false` | Alpha | 1.15 | 1.15 |
| `ServiceLoadBalancerFinalizer` | `true` | Beta | 1.16 | 1.16 |
| `ServiceLoadBalancerFinalizer` | `true` | GA | 1.17 | - |
| `StartupProbe` | `false` | Alpha | 1.16 | 1.17 |
| `StartupProbe` | `true` | Beta | 1.18 | 1.19 |
| `StartupProbe` | `true` | GA | 1.20 | - |
| `StorageObjectInUseProtection` | `true` | Beta | 1.10 | 1.10 |
| `StorageObjectInUseProtection` | `true` | GA | 1.11 | - |
| `StreamingProxyRedirects` | `false` | Beta | 1.5 | 1.5 |
@ -290,12 +312,27 @@ different Kubernetes components.
| `SupportIPVSProxyMode` | `false` | Beta | 1.9 | 1.9 |
| `SupportIPVSProxyMode` | `true` | Beta | 1.10 | 1.10 |
| `SupportIPVSProxyMode` | `true` | GA | 1.11 | - |
| `SupportNodePidsLimit` | `false` | Alpha | 1.14 | 1.14 |
| `SupportNodePidsLimit` | `true` | Beta | 1.15 | 1.19 |
| `SupportNodePidsLimit` | `true` | GA | 1.20 | - |
| `SupportPodPidsLimit` | `false` | Alpha | 1.10 | 1.13 |
| `SupportPodPidsLimit` | `true` | Beta | 1.14 | 1.19 |
| `SupportPodPidsLimit` | `true` | GA | 1.20 | - |
| `TaintBasedEvictions` | `false` | Alpha | 1.6 | 1.12 |
| `TaintBasedEvictions` | `true` | Beta | 1.13 | 1.17 |
| `TaintBasedEvictions` | `true` | GA | 1.18 | - |
| `TaintNodesByCondition` | `false` | Alpha | 1.8 | 1.11 |
| `TaintNodesByCondition` | `true` | Beta | 1.12 | 1.16 |
| `TaintNodesByCondition` | `true` | GA | 1.17 | - |
| `TokenRequest` | `false` | Alpha | 1.10 | 1.11 |
| `TokenRequest` | `true` | Beta | 1.12 | 1.19 |
| `TokenRequest` | `true` | GA | 1.20 | - |
| `TokenRequestProjection` | `false` | Alpha | 1.11 | 1.11 |
| `TokenRequestProjection` | `true` | Beta | 1.12 | 1.19 |
| `TokenRequestProjection` | `true` | GA | 1.20 | - |
| `VolumeSnapshotDataSource` | `false` | Alpha | 1.12 | 1.16 |
| `VolumeSnapshotDataSource` | `true` | Beta | 1.17 | 1.19 |
| `VolumeSnapshotDataSource` | `true` | GA | 1.20 | - |
| `VolumePVCDataSource` | `false` | Alpha | 1.15 | 1.15 |
| `VolumePVCDataSource` | `true` | Beta | 1.16 | 1.17 |
| `VolumePVCDataSource` | `true` | GA | 1.18 | - |
@ -369,6 +406,7 @@ Each feature gate is designed for enabling/disabling a specific feature:
- `APIListChunking`: Enable the API clients to retrieve (`LIST` or `GET`) resources from API server in chunks.
- `APIPriorityAndFairness`: Enable managing request concurrency with prioritization and fairness at each server. (Renamed from `RequestManagement`)
- `APIResponseCompression`: Compress the API responses for `LIST` or `GET` requests.
- `APIServerIdentity`: Assign each kube-apiserver an ID in a cluster.
- `AppArmor`: Enable AppArmor based mandatory access control on Linux nodes when using Docker.
See [AppArmor Tutorial](/docs/tutorials/clusters/apparmor/) for more details.
- `AttachVolumeLimit`: Enable volume plugins to report limits on number of volumes
@ -387,6 +425,7 @@ Each feature gate is designed for enabling/disabling a specific feature:
Check [Bound Service Account Tokens](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md)
for more details.
- `ConfigurableFSGroupPolicy`: Allows user to configure volume permission change policy for fsGroups when mounting a volume in a Pod. See [Configure volume permission and ownership change policy for Pods](/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods) for more details.
- `CronJobControllerV2`: Use an alternative implementation of the {{< glossary_tooltip text="CronJob" term_id="cronjob" >}} controller. Otherwise, version 1 of the same controller is selected. The version 2 controller provides experimental performance improvements.
- `CPUManager`: Enable container level CPU affinity support, see [CPU Management Policies](/docs/tasks/administer-cluster/cpu-management-policies/).
- `CRIContainerLogRotation`: Enable container log rotation for cri container runtime.
- `CSIBlockVolume`: Enable external CSI volume drivers to support block storage. See the [`csi` raw block volume support](/docs/concepts/storage/volumes/#csi-raw-block-volume-support) documentation for more details.
@ -409,6 +448,7 @@ Each feature gate is designed for enabling/disabling a specific feature:
- `CSIPersistentVolume`: Enable discovering and mounting volumes provisioned through a
[CSI (Container Storage Interface)](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/container-storage-interface.md)
compatible volume plugin.
- `CSIServiceAccountToken`: Enable CSI drivers to receive the pods' service account token that they mount volumes for. See [Token Requests](https://kubernetes-csi.github.io/docs/token-requests.html).
- `CSIStorageCapacity`: Enables CSI drivers to publish storage capacity information and the Kubernetes scheduler to use that information when scheduling pods. See [Storage Capacity](/docs/concepts/storage/storage-capacity/).
Check the [`csi` volume type](/docs/concepts/storage/volumes/#csi) documentation for more details.
- `CSIVolumeFSGroupPolicy`: Allows CSIDrivers to use the `fsGroupPolicy` field. This field controls whether volumes created by a CSIDriver support volume ownership and permission modifications when these volumes are mounted.
@ -425,11 +465,12 @@ Each feature gate is designed for enabling/disabling a specific feature:
- `CustomResourceWebhookConversion`: Enable webhook-based conversion
on resources created from [CustomResourceDefinition](/docs/concepts/extend-kubernetes/api-extension/custom-resources/).
troubleshoot a running Pod.
- `DisableAcceleratorUsageMetrics`: [Disable accelerator metrics collected by the kubelet](/docs/concepts/cluster-administration/system-metrics/).
- `DisableAcceleratorUsageMetrics`: [Disable accelerator metrics collected by the kubelet](/docs/concepts/cluster-administration/system-metrics/#disable-accelerator-metrics).
- `DevicePlugins`: Enable the [device-plugins](/docs/concepts/cluster-administration/device-plugins/)
based resource provisioning on nodes.
- `DefaultPodTopologySpread`: Enables the use of `PodTopologySpread` scheduling plugin to do
[default spreading](/docs/concepts/workloads/pods/pod-topology-spread-constraints/#internal-default-constraints).
- `DownwardAPIHugePages`: Enables usage of hugepages in downward API.
- `DryRun`: Enable server-side [dry run](/docs/reference/using-api/api-concepts/#dry-run) requests
so that validation, merging, and mutation can be tested without committing.
- `DynamicAuditing`(*deprecated*): Used to enable dynamic auditing before v1.19.
@ -442,6 +483,7 @@ Each feature gate is designed for enabling/disabling a specific feature:
- `EphemeralContainers`: Enable the ability to add {{< glossary_tooltip text="ephemeral containers"
term_id="ephemeral-container" >}} to running pods.
- `EvenPodsSpread`: Enable pods to be scheduled evenly across topology domains. See [Pod Topology Spread Constraints](/docs/concepts/workloads/pods/pod-topology-spread-constraints/).
- `ExecProbeTimeout`: Ensure kubelet respects exec probe timeouts. This feature gate exists in case any of your existing workloads depend on a now-corrected fault where Kubernetes ignored exec probe timeouts. See [readiness probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes).
- `ExpandInUsePersistentVolumes`: Enable expanding in-use PVCs. See [Resizing an in-use PersistentVolumeClaim](/docs/concepts/storage/persistent-volumes/#resizing-an-in-use-persistentvolumeclaim).
- `ExpandPersistentVolumes`: Enable the expanding of persistent volumes. See [Expanding Persistent Volumes Claims](/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims).
- `ExperimentalCriticalPodAnnotation`: Enable annotating specific pods as *critical* so that their [scheduling is guaranteed](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/).
@ -453,6 +495,9 @@ Each feature gate is designed for enabling/disabling a specific feature:
if user namespace remapping is enabled in the Docker daemon.
- `EndpointSlice`: Enables Endpoint Slices for more scalable and extensible
network endpoints. See [Enabling Endpoint Slices](/docs/tasks/administer-cluster/enabling-endpointslices/).
- `EndpointSliceNodeName`: Enables EndpointSlice `nodeName` field.
- `EndpointSliceTerminating`: Enables EndpointSlice `terminating` and `serving`
condition fields.
- `EndpointSliceProxying`: When this feature gate is enabled, kube-proxy running
on Linux will use EndpointSlices as the primary data source instead of
Endpoints, enabling scalability and performance improvements. See
@ -463,6 +508,7 @@ Each feature gate is designed for enabling/disabling a specific feature:
[Enabling Endpoint Slices](/docs/tasks/administer-cluster/enabling-endpointslices/).
- `GCERegionalPersistentDisk`: Enable the regional PD feature on GCE.
- `GenericEphemeralVolume`: Enables ephemeral, inline volumes that support all features of normal volumes (can be provided by third-party storage vendors, storage capacity tracking, restore from snapshot, etc.). See [Ephemeral Volumes](/docs/concepts/storage/ephemeral-volumes/).
- `GracefulNodeShutdown`: Enables support for graceful shutdown in kubelet. During a system shutdown, kubelet will attempt to detect the shutdown event and gracefully terminate pods running on the node. See [Graceful Node Shutdown](/docs/concepts/architecture/nodes/#graceful-node-shutdown) for more details.
- `HugePages`: Enable the allocation and consumption of pre-allocated [huge pages](/docs/tasks/manage-hugepages/scheduling-hugepages/).
- `HugePageStorageMediumSize`: Enable support for multiple sizes pre-allocated [huge pages](/docs/tasks/manage-hugepages/scheduling-hugepages/).
- `HyperVContainer`: Enable [Hyper-V isolation](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container) for Windows containers.
@ -470,6 +516,7 @@ Each feature gate is designed for enabling/disabling a specific feature:
- `ImmutableEphemeralVolumes`: Allows for marking individual Secrets and ConfigMaps as immutable for better safety and performance.
- `KubeletConfigFile`: Enable loading kubelet configuration from a file specified using a config file.
See [setting kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file/) for more details.
- `KubeletCredentialProviders`: Enable kubelet exec credential providers for image pull credentials.
- `KubeletPluginsWatcher`: Enable probe-based plugin watcher utility to enable kubelet
to discover plugins such as [CSI volume drivers](/docs/concepts/storage/volumes/#csi).
- `KubeletPodResources`: Enable the kubelet's pod resources grpc endpoint.
@ -477,6 +524,7 @@ Each feature gate is designed for enabling/disabling a specific feature:
- `LegacyNodeRoleBehavior`: When disabled, legacy behavior in service load balancers and node disruption will ignore the `node-role.kubernetes.io/master` label in favor of the feature-specific labels provided by `NodeDisruptionExclusion` and `ServiceNodeExclusion`.
- `LocalStorageCapacityIsolation`: Enable the consumption of [local ephemeral storage](/docs/concepts/configuration/manage-resources-containers/) and also the `sizeLimit` property of an [emptyDir volume](/docs/concepts/storage/volumes/#emptydir).
- `LocalStorageCapacityIsolationFSQuotaMonitoring`: When `LocalStorageCapacityIsolation` is enabled for [local ephemeral storage](/docs/concepts/configuration/manage-resources-containers/) and the backing filesystem for [emptyDir volumes](/docs/concepts/storage/volumes/#emptydir) supports project quotas and they are enabled, use project quotas to monitor [emptyDir volume](/docs/concepts/storage/volumes/#emptydir) storage consumption rather than filesystem walk for better performance and accuracy.
- `MixedProtocolLBService`: Enable using different protocols in the same LoadBalancer type Service instance.
- `MountContainers`: Enable using utility containers on host as the volume mounter.
- `MountPropagation`: Enable sharing volume mounted by one container to other containers or pods.
For more details, please see [mount propagation](/docs/concepts/storage/volumes/#mount-propagation).
@ -504,6 +552,8 @@ Each feature gate is designed for enabling/disabling a specific feature:
the input Pod's cpu and memory limits. The intent is to break ties between
nodes with same scores.
- `ResourceQuotaScopeSelectors`: Enable resource quota scope selectors.
- `RootCAConfigMap`: Configure the kube-controller-manager to publish a {{< glossary_tooltip text="ConfigMap" term_id="configmap" >}} named `kube-root-ca.crt` to every namespace. This ConfigMap contains a CA bundle used for verifying connections to the kube-apiserver.
See [Bound Service Account Tokens](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md) for more details.
- `RotateKubeletClientCertificate`: Enable the rotation of the client TLS certificate on the kubelet.
See [kubelet configuration](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration) for more details.
- `RotateKubeletServerCertificate`: Enable the rotation of the server TLS certificate on the kubelet.
@ -515,10 +565,12 @@ Each feature gate is designed for enabling/disabling a specific feature:
- `ServerSideApply`: Enables the [Sever Side Apply (SSA)](/docs/reference/using-api/server-side-apply/) path at the API Server.
- `ServiceAccountIssuerDiscovery`: Enable OIDC discovery endpoints (issuer and JWKS URLs) for the service account issuer in the API server. See [Configure Service Accounts for Pods](/docs/tasks/configure-pod-container/configure-service-account/#service-account-issuer-discovery) for more details.
- `ServiceAppProtocol`: Enables the `AppProtocol` field on Services and Endpoints.
- `ServiceLBNodePortControl`: Enables the `spec.allocateLoadBalancerNodePorts` field on Services.
- `ServiceLoadBalancerFinalizer`: Enable finalizer protection for Service load balancers.
- `ServiceNodeExclusion`: Enable the exclusion of nodes from load balancers created by a cloud provider.
A node is eligible for exclusion if labelled with "`alpha.service-controller.kubernetes.io/exclude-balancer`" key or `node.kubernetes.io/exclude-from-external-load-balancers`.
- `ServiceTopology`: Enable service to route traffic based upon the Node topology of the cluster. See [ServiceTopology](/docs/concepts/services-networking/service-topology/) for more details.
- `SizeMemoryBackedVolumes`: Enables kubelet support to size memory backed volumes. See [volumes](docs/concepts/storage/volumes) for more details.
- `SetHostnameAsFQDN`: Enable the ability of setting Fully Qualified Domain Name(FQDN) as hostname of pod. See [Pod's `setHostnameAsFQDN` field](/docs/concepts/services-networking/dns-pod-service/#pod-sethostnameasfqdn-field).
- `StartupProbe`: Enable the [startup](/docs/concepts/workloads/pods/pod-lifecycle/#when-should-you-use-a-startup-probe) probe in the kubelet.
- `StorageObjectInUseProtection`: Postpone the deletion of PersistentVolume or
@ -530,6 +582,7 @@ Each feature gate is designed for enabling/disabling a specific feature:
- `SupportIPVSProxyMode`: Enable providing in-cluster service load balancing using IPVS.
See [service proxies](/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies) for more details.
- `SupportPodPidsLimit`: Enable the support to limiting PIDs in Pods.
- `SupportNodePidsLimit`: Enable the support to limiting PIDs on the Node. The parameter `pid=<number>` in the `--system-reserved` and `--kube-reserved` options can be specified to ensure that the specified number of process IDs will be reserved for the system as a whole and for Kubernetes system daemons respectively.
- `Sysctls`: Enable support for namespaced kernel parameters (sysctls) that can be set for each pod.
See [sysctls](/docs/tasks/administer-cluster/sysctl-cluster/) for more details.
- `TaintBasedEvictions`: Enable evicting pods from nodes based on taints on nodes and tolerations on Pods.

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -579,7 +579,7 @@ ConfigurableFSGroupPolicy=true|false (ALPHA - default=false)<br/>
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)<br/>
DefaultPodTopologySpread=true|false (ALPHA - default=false)<br/>
DevicePlugins=true|false (BETA - default=true)<br/>
DisableAcceleratorUsageMetrics=true|false (ALPHA - default=false)<br/>
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)<br/>
DynamicKubeletConfig=true|false (BETA - default=true)<br/>
EndpointSlice=true|false (BETA - default=true)<br/>
EndpointSliceProxying=true|false (BETA - default=true)<br/>
@ -617,7 +617,6 @@ ServiceAppProtocol=true|false (BETA - default=true)<br/>
ServiceNodeExclusion=true|false (BETA - default=true)<br/>
ServiceTopology=true|false (ALPHA - default=false)<br/>
SetHostnameAsFQDN=true|false (ALPHA - default=false)<br/>
StartupProbe=true|false (BETA - default=true)<br/>
StorageVersionHash=true|false (BETA - default=true)<br/>
SupportNodePidsLimit=true|false (BETA - default=true)<br/>
SupportPodPidsLimit=true|false (BETA - default=true)<br/>

View File

@ -206,6 +206,13 @@ kubectl [flags]
<td></td><td style="line-height: 130%; word-wrap: break-word;">If present, the namespace scope for this CLI request</td>
</tr>
<tr>
<td colspan="2">--one-output</td>
</tr>
<tr>
<td></td><td style="line-height: 130%; word-wrap: break-word;">If true, only write logs to their native severity level (vs also writing to each lower severity level</td>
</tr>
<tr>
<td colspan="2">--password string</td>
</tr>
@ -325,7 +332,6 @@ kubectl [flags]
## {{% heading "seealso" %}}
* [kubectl alpha](/docs/reference/generated/kubectl/kubectl-commands#alpha) - Commands for features in alpha
* [kubectl annotate](/docs/reference/generated/kubectl/kubectl-commands#annotate) - Update the annotations on a resource
* [kubectl api-resources](/docs/reference/generated/kubectl/kubectl-commands#api-resources) - Print the supported API resources on the server
* [kubectl api-versions](/docs/reference/generated/kubectl/kubectl-commands#api-versions) - Print the supported API versions on the server, in the form of "group/version"
@ -337,10 +343,10 @@ kubectl [flags]
* [kubectl cluster-info](/docs/reference/generated/kubectl/kubectl-commands#cluster-info) - Display cluster info
* [kubectl completion](/docs/reference/generated/kubectl/kubectl-commands#completion) - Output shell completion code for the specified shell (bash or zsh)
* [kubectl config](/docs/reference/generated/kubectl/kubectl-commands#config) - Modify kubeconfig files
* [kubectl convert](/docs/reference/generated/kubectl/kubectl-commands#convert) - Convert config files between different API versions
* [kubectl cordon](/docs/reference/generated/kubectl/kubectl-commands#cordon) - Mark node as unschedulable
* [kubectl cp](/docs/reference/generated/kubectl/kubectl-commands#cp) - Copy files and directories to and from containers.
* [kubectl create](/docs/reference/generated/kubectl/kubectl-commands#create) - Create a resource from a file or from stdin.
* [kubectl debug](/docs/reference/generated/kubectl/kubectl-commands#debug) - Create debugging sessions for troubleshooting workloads and nodes
* [kubectl delete](/docs/reference/generated/kubectl/kubectl-commands#delete) - Delete resources by filenames, stdin, resources and names, or by resources and label selector
* [kubectl describe](/docs/reference/generated/kubectl/kubectl-commands#describe) - Show details of a specific resource or group of resources
* [kubectl diff](/docs/reference/generated/kubectl/kubectl-commands#diff) - Diff live version against would-be applied version
@ -354,7 +360,7 @@ kubectl [flags]
* [kubectl label](/docs/reference/generated/kubectl/kubectl-commands#label) - Update the labels on a resource
* [kubectl logs](/docs/reference/generated/kubectl/kubectl-commands#logs) - Print the logs for a container in a pod
* [kubectl options](/docs/reference/generated/kubectl/kubectl-commands#options) - Print the list of flags inherited by all commands
* [kubectl patch](/docs/reference/generated/kubectl/kubectl-commands#patch) - Update field(s) of a resource using strategic merge patch
* [kubectl patch](/docs/reference/generated/kubectl/kubectl-commands#patch) - Update field(s) of a resource
* [kubectl plugin](/docs/reference/generated/kubectl/kubectl-commands#plugin) - Provides utilities for interacting with plugins.
* [kubectl port-forward](/docs/reference/generated/kubectl/kubectl-commands#port-forward) - Forward one or more local ports to a pod
* [kubectl proxy](/docs/reference/generated/kubectl/kubectl-commands#proxy) - Run a proxy to the Kubernetes API server

View File

@ -1,6 +1,6 @@
---
title: v1.19
title: v1.20
weight: 50
---
[Kubernetes API v1.19](/docs/reference/generated/kubernetes-api/v1.19/)
[Kubernetes API v1.20](/docs/reference/generated/kubernetes-api/v1.20/)

View File

@ -13,8 +13,8 @@ kubeadm alpha kubeconfig user [flags]
### Examples
```
# Output a kubeconfig file for an additional user named foo
kubeadm alpha kubeconfig user --client-name=foo
# Output a kubeconfig file for an additional user named foo using a kubeadm config file bar
kubeadm alpha kubeconfig user --client-name=foo --config=bar
```
### Options
@ -26,27 +26,6 @@ kubeadm alpha kubeconfig user [flags]
</colgroup>
<tbody>
<tr>
<td colspan="2">--apiserver-advertise-address string</td>
</tr>
<tr>
<td></td><td style="line-height: 130%; word-wrap: break-word;">The IP address the API server is accessible on</td>
</tr>
<tr>
<td colspan="2">--apiserver-bind-port int32&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Default: 6443</td>
</tr>
<tr>
<td></td><td style="line-height: 130%; word-wrap: break-word;">The port the API server is accessible on</td>
</tr>
<tr>
<td colspan="2">--cert-dir string&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Default: "/etc/kubernetes/pki"</td>
</tr>
<tr>
<td></td><td style="line-height: 130%; word-wrap: break-word;">The path where certificates are stored</td>
</tr>
<tr>
<td colspan="2">--client-name string</td>
</tr>
@ -54,6 +33,13 @@ kubeadm alpha kubeconfig user [flags]
<td></td><td style="line-height: 130%; word-wrap: break-word;">The name of user. It will be used as the CN if client certificates are created</td>
</tr>
<tr>
<td colspan="2">--config string</td>
</tr>
<tr>
<td></td><td style="line-height: 130%; word-wrap: break-word;">Path to a kubeadm configuration file.</td>
</tr>
<tr>
<td colspan="2">-h, --help</td>
</tr>

View File

@ -11,7 +11,7 @@ generate and print one for you.
```
kubeadm alpha certs certificate-key [flags]
kubeadm certs certificate-key [flags]
```
### Options

View File

@ -5,7 +5,7 @@
Checks expiration for the certificates in the local PKI managed by kubeadm.
```
kubeadm alpha certs check-expiration [flags]
kubeadm certs check-expiration [flags]
```
### Options

View File

@ -9,7 +9,7 @@ This command is designed for use in [Kubeadm External CA Mode](https://kubernete
The PEM encoded signed certificates should then be saved alongside the key files, using ".crt" as the file extension, or in the case of kubeconfig files, the PEM encoded signed certificate should be base64 encoded and added to the kubeconfig file in the "users &gt; user &gt; client-certificate-data" field.
```
kubeadm alpha certs generate-csr [flags]
kubeadm certs generate-csr [flags]
```
### Examples

View File

@ -5,7 +5,7 @@
This command is not meant to be run on its own. See list of available subcommands.
```
kubeadm alpha certs renew [flags]
kubeadm certs renew [flags]
```
### Options

View File

@ -11,7 +11,7 @@ Renewal by default tries to use the certificate authority in the local PKI manag
After renewal, in order to make changes effective, is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere.
```
kubeadm alpha certs renew admin.conf [flags]
kubeadm certs renew admin.conf [flags]
```
### Options

View File

@ -5,7 +5,7 @@
Renew all known certificates necessary to run the control plane. Renewals are run unconditionally, regardless of expiration date. Renewals can also be run individually for more control.
```
kubeadm alpha certs renew all [flags]
kubeadm certs renew all [flags]
```
### Options

View File

@ -11,7 +11,7 @@ Renewal by default tries to use the certificate authority in the local PKI manag
After renewal, in order to make changes effective, is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere.
```
kubeadm alpha certs renew apiserver-etcd-client [flags]
kubeadm certs renew apiserver-etcd-client [flags]
```
### Options

View File

@ -11,7 +11,7 @@ Renewal by default tries to use the certificate authority in the local PKI manag
After renewal, in order to make changes effective, is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere.
```
kubeadm alpha certs renew apiserver-kubelet-client [flags]
kubeadm certs renew apiserver-kubelet-client [flags]
```
### Options

View File

@ -11,7 +11,7 @@ Renewal by default tries to use the certificate authority in the local PKI manag
After renewal, in order to make changes effective, is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere.
```
kubeadm alpha certs renew apiserver [flags]
kubeadm certs renew apiserver [flags]
```
### Options

View File

@ -11,7 +11,7 @@ Renewal by default tries to use the certificate authority in the local PKI manag
After renewal, in order to make changes effective, is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere.
```
kubeadm alpha certs renew controller-manager.conf [flags]
kubeadm certs renew controller-manager.conf [flags]
```
### Options

View File

@ -11,7 +11,7 @@ Renewal by default tries to use the certificate authority in the local PKI manag
After renewal, in order to make changes effective, is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere.
```
kubeadm alpha certs renew etcd-healthcheck-client [flags]
kubeadm certs renew etcd-healthcheck-client [flags]
```
### Options

View File

@ -11,7 +11,7 @@ Renewal by default tries to use the certificate authority in the local PKI manag
After renewal, in order to make changes effective, is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere.
```
kubeadm alpha certs renew etcd-peer [flags]
kubeadm certs renew etcd-peer [flags]
```
### Options

View File

@ -11,7 +11,7 @@ Renewal by default tries to use the certificate authority in the local PKI manag
After renewal, in order to make changes effective, is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere.
```
kubeadm alpha certs renew etcd-server [flags]
kubeadm certs renew etcd-server [flags]
```
### Options

View File

@ -11,7 +11,7 @@ Renewal by default tries to use the certificate authority in the local PKI manag
After renewal, in order to make changes effective, is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere.
```
kubeadm alpha certs renew front-proxy-client [flags]
kubeadm certs renew front-proxy-client [flags]
```
### Options

View File

@ -11,7 +11,7 @@ Renewal by default tries to use the certificate authority in the local PKI manag
After renewal, in order to make changes effective, is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere.
```
kubeadm alpha certs renew scheduler.conf [flags]
kubeadm certs renew scheduler.conf [flags]
```
### Options

View File

@ -77,6 +77,13 @@ kubeadm init phase control-plane all [flags]
<td></td><td style="line-height: 130%; word-wrap: break-word;">A set of extra flags to pass to the Controller Manager or override default ones in form of &lt;flagname&gt;=&lt;value&gt;</td>
</tr>
<tr>
<td colspan="2">--experimental-patches string</td>
</tr>
<tr>
<td></td><td style="line-height: 130%; word-wrap: break-word;">Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.</td>
</tr>
<tr>
<td colspan="2">--feature-gates string</td>
</tr>

View File

@ -59,6 +59,13 @@ kubeadm init phase control-plane apiserver [flags]
<td></td><td style="line-height: 130%; word-wrap: break-word;">Specify a stable IP address or DNS name for the control plane.</td>
</tr>
<tr>
<td colspan="2">--experimental-patches string</td>
</tr>
<tr>
<td></td><td style="line-height: 130%; word-wrap: break-word;">Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.</td>
</tr>
<tr>
<td colspan="2">--feature-gates string</td>
</tr>

View File

@ -38,6 +38,13 @@ kubeadm init phase control-plane controller-manager [flags]
<td></td><td style="line-height: 130%; word-wrap: break-word;">A set of extra flags to pass to the Controller Manager or override default ones in form of &lt;flagname&gt;=&lt;value&gt;</td>
</tr>
<tr>
<td colspan="2">--experimental-patches string</td>
</tr>
<tr>
<td></td><td style="line-height: 130%; word-wrap: break-word;">Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.</td>
</tr>
<tr>
<td colspan="2">-h, --help</td>
</tr>

View File

@ -31,6 +31,13 @@ kubeadm init phase control-plane scheduler [flags]
<td></td><td style="line-height: 130%; word-wrap: break-word;">Path to a kubeadm configuration file.</td>
</tr>
<tr>
<td colspan="2">--experimental-patches string</td>
</tr>
<tr>
<td></td><td style="line-height: 130%; word-wrap: break-word;">Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.</td>
</tr>
<tr>
<td colspan="2">-h, --help</td>
</tr>

View File

@ -38,6 +38,13 @@ kubeadm init phase upload-certs [flags]
<td></td><td style="line-height: 130%; word-wrap: break-word;">help for upload-certs</td>
</tr>
<tr>
<td colspan="2">--kubeconfig string&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Default: "/etc/kubernetes/admin.conf"</td>
</tr>
<tr>
<td></td><td style="line-height: 130%; word-wrap: break-word;">The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.</td>
</tr>
<tr>
<td colspan="2">--skip-certificate-key-print</td>
</tr>

View File

@ -1,7 +1,4 @@
---
reviewers:
- luxas
- jbeda
title: kubeadm alpha
content_type: concept
weight: 90
@ -12,62 +9,6 @@ weight: 90
from the community. Please try it out and give us feedback!
{{< /caution >}}
## kubeadm alpha certs {#cmd-certs}
A collection of operations for operating Kubernetes certificates.
{{< tabs name="tab-certs" >}}
{{< tab name="overview" include="generated/kubeadm_alpha_certs.md" />}}
{{< /tabs >}}
## kubeadm alpha certs renew {#cmd-certs-renew}
You can renew all Kubernetes certificates using the `all` subcommand or renew them selectively.
For more details about certificate expiration and renewal see the [certificate management documentation](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/).
{{< tabs name="tab-certs-renew" >}}
{{< tab name="renew" include="generated/kubeadm_alpha_certs_renew.md" />}}
{{< tab name="all" include="generated/kubeadm_alpha_certs_renew_all.md" />}}
{{< tab name="admin.conf" include="generated/kubeadm_alpha_certs_renew_admin.conf.md" />}}
{{< tab name="apiserver-etcd-client" include="generated/kubeadm_alpha_certs_renew_apiserver-etcd-client.md" />}}
{{< tab name="apiserver-kubelet-client" include="generated/kubeadm_alpha_certs_renew_apiserver-kubelet-client.md" />}}
{{< tab name="apiserver" include="generated/kubeadm_alpha_certs_renew_apiserver.md" />}}
{{< tab name="controller-manager.conf" include="generated/kubeadm_alpha_certs_renew_controller-manager.conf.md" />}}
{{< tab name="etcd-healthcheck-client" include="generated/kubeadm_alpha_certs_renew_etcd-healthcheck-client.md" />}}
{{< tab name="etcd-peer" include="generated/kubeadm_alpha_certs_renew_etcd-peer.md" />}}
{{< tab name="etcd-server" include="generated/kubeadm_alpha_certs_renew_etcd-server.md" />}}
{{< tab name="front-proxy-client" include="generated/kubeadm_alpha_certs_renew_front-proxy-client.md" />}}
{{< tab name="scheduler.conf" include="generated/kubeadm_alpha_certs_renew_scheduler.conf.md" />}}
{{< /tabs >}}
## kubeadm alpha certs certificate-key {#cmd-certs-certificate-key}
This command can be used to generate a new control-plane certificate key.
The key can be passed as `--certificate-key` to `kubeadm init` and `kubeadm join`
to enable the automatic copy of certificates when joining additional control-plane nodes.
{{< tabs name="tab-certs-certificate-key" >}}
{{< tab name="certificate-key" include="generated/kubeadm_alpha_certs_certificate-key.md" />}}
{{< /tabs >}}
## kubeadm alpha certs generate-csr {#cmd-certs-generate-csr}
This command can be used to generate certificate signing requests (CSRs) which
can be submitted to a certificate authority (CA) for signing.
{{< tabs name="tab-certs-generate-csr" >}}
{{< tab name="certificate-generate-csr" include="generated/kubeadm_alpha_certs_generate-csr.md" />}}
{{< /tabs >}}
## kubeadm alpha certs check-expiration {#cmd-certs-check-expiration}
This command checks expiration for the certificates in the local PKI managed by kubeadm.
For more details about certificate expiration and renewal see the [certificate management documentation](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/).
{{< tabs name="tab-certs-check-expiration" >}}
{{< tab name="check-expiration" include="generated/kubeadm_alpha_certs_check-expiration.md" />}}
{{< /tabs >}}
## kubeadm alpha kubeconfig user {#cmd-phase-kubeconfig}
The `user` subcommand can be used for the creation of kubeconfig files for additional users.

View File

@ -0,0 +1,73 @@
---
title: kubeadm certs
content_type: concept
weight: 90
---
`kubeadm certs` provides utilities for managing certificates.
For more details on how these commands can be used, see
[Certificate Management with kubeadm](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/).
## kubeadm certs {#cmd-certs}
A collection of operations for operating Kubernetes certificates.
{{< tabs name="tab-certs" >}}
{{< tab name="overview" include="generated/kubeadm_certs.md" />}}
{{< /tabs >}}
## kubeadm certs renew {#cmd-certs-renew}
You can renew all Kubernetes certificates using the `all` subcommand or renew them selectively.
For more details see [Manual certificate renewal](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#manual-certificate-renewal).
{{< tabs name="tab-certs-renew" >}}
{{< tab name="renew" include="generated/kubeadm_certs_renew.md" />}}
{{< tab name="all" include="generated/kubeadm_certs_renew_all.md" />}}
{{< tab name="admin.conf" include="generated/kubeadm_certs_renew_admin.conf.md" />}}
{{< tab name="apiserver-etcd-client" include="generated/kubeadm_certs_renew_apiserver-etcd-client.md" />}}
{{< tab name="apiserver-kubelet-client" include="generated/kubeadm_certs_renew_apiserver-kubelet-client.md" />}}
{{< tab name="apiserver" include="generated/kubeadm_certs_renew_apiserver.md" />}}
{{< tab name="controller-manager.conf" include="generated/kubeadm_certs_renew_controller-manager.conf.md" />}}
{{< tab name="etcd-healthcheck-client" include="generated/kubeadm_certs_renew_etcd-healthcheck-client.md" />}}
{{< tab name="etcd-peer" include="generated/kubeadm_certs_renew_etcd-peer.md" />}}
{{< tab name="etcd-server" include="generated/kubeadm_certs_renew_etcd-server.md" />}}
{{< tab name="front-proxy-client" include="generated/kubeadm_certs_renew_front-proxy-client.md" />}}
{{< tab name="scheduler.conf" include="generated/kubeadm_certs_renew_scheduler.conf.md" />}}
{{< /tabs >}}
## kubeadm certs certificate-key {#cmd-certs-certificate-key}
This command can be used to generate a new control-plane certificate key.
The key can be passed as `--certificate-key` to [`kubeadm init`](/docs/reference/setup-tools/kubeadm/kubeadm-init)
and [`kubeadm join`](/docs/reference/setup-tools/kubeadm/kubeadm-join)
to enable the automatic copy of certificates when joining additional control-plane nodes.
{{< tabs name="tab-certs-certificate-key" >}}
{{< tab name="certificate-key" include="generated/kubeadm_certs_certificate-key.md" />}}
{{< /tabs >}}
## kubeadm certs check-expiration {#cmd-certs-check-expiration}
This command checks expiration for the certificates in the local PKI managed by kubeadm.
For more details see
[Check certificate expiration](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#check-certificate-expiration).
{{< tabs name="tab-certs-check-expiration" >}}
{{< tab name="check-expiration" include="generated/kubeadm_certs_check-expiration.md" />}}
{{< /tabs >}}
## kubeadm certs generate-csr {#cmd-certs-generate-csr}
This command can be used to generate keys and CSRs for all control-plane certificates and kubeconfig files.
The user can then sign the CSRs with a CA of their choice.
{{< tabs name="tab-certs-generate-csr" >}}
{{< tab name="generate-csr" include="generated/kubeadm_certs_generate-csr.md" />}}
{{< /tabs >}}
## {{% heading "whatsnext" %}}
* [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) to bootstrap a Kubernetes control-plane node
* [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) to connect a node to the cluster
* [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) to revert any changes made to this host by `kubeadm init` or `kubeadm join`

View File

@ -178,7 +178,7 @@ If the flag `--certificate-key` is not passed to `kubeadm init` and
The following command can be used to generate a new key on demand:
```shell
kubeadm alpha certs certificate-key
kubeadm certs certificate-key
```
### Certificate management with kubeadm
@ -246,7 +246,7 @@ or use a DNS name or an address of a load balancer.
nodes. The key can be generated using:
```shell
kubeadm alpha certs certificate-key
kubeadm certs certificate-key
```
Once the cluster is up, you can grab the admin credentials from the control-plane node

View File

@ -166,8 +166,8 @@ sudo systemctl restart containerd
```powershell
# (Install containerd)
# download containerd
cmd /c curl -OL https://github.com/containerd/containerd/releases/download/v1.4.0-beta.2/containerd-1.4.0-beta.2-windows-amd64.tar.gz
cmd /c tar xvf .\containerd-1.4.0-beta.2-windows-amd64.tar.gz
cmd /c curl -OL https://github.com/containerd/containerd/releases/download/v1.4.1/containerd-1.4.1-windows-amd64.tar.gz
cmd /c tar xvf .\containerd-1.4.1-windows-amd64.tar.gz
```
```powershell

View File

@ -151,58 +151,9 @@ have container image support for this architecture.
`kubeadm init` first runs a series of prechecks to ensure that the machine
is ready to run Kubernetes. These prechecks expose warnings and exit on errors. `kubeadm init`
then downloads and installs the cluster control plane components. This may take several minutes.
The output should look like:
After it finishes you should see:
```none
[init] Using Kubernetes version: vX.Y.Z
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [kubeadm-cp localhost] and IPs [10.138.0.4 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [kubeadm-cp localhost] and IPs [10.138.0.4 127.0.0.1 ::1]
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubeadm-cp kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.138.0.4]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 31.501735 seconds
[uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-X.Y" in namespace kube-system with the configuration for the kubelets in the cluster
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "kubeadm-cp" as an annotation
[mark-control-plane] Marking the node kubeadm-cp as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node kubeadm-cp as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: <token>
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:

View File

@ -133,10 +133,10 @@ option. Your cluster requirements may need a different configuration.
...
You can now join any number of control-plane node by running the following command on each as a root:
kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use kubeadm init phase upload-certs to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866
```
@ -155,7 +155,7 @@ option. Your cluster requirements may need a different configuration.
To generate such a key you can use the following command:
```sh
kubeadm alpha certs certificate-key
kubeadm certs certificate-key
```
{{< note >}}

View File

@ -11,8 +11,6 @@ weight: 65
Windows applications constitute a large portion of the services and applications that run in many organizations. [Windows containers](https://aka.ms/windowscontainers) provide a modern way to encapsulate processes and package dependencies, making it easier to use DevOps practices and follow cloud native patterns for Windows applications. Kubernetes has become the defacto standard container orchestrator, and the release of Kubernetes 1.14 includes production support for scheduling Windows containers on Windows nodes in a Kubernetes cluster, enabling a vast ecosystem of Windows applications to leverage the power of Kubernetes. Organizations with investments in Windows-based applications and Linux-based applications don't have to look for separate orchestrators to manage their workloads, leading to increased operational efficiencies across their deployments, regardless of operating system.
<!-- body -->
## Windows containers in Kubernetes
@ -39,12 +37,10 @@ Refer to the following table for Windows operating system support in Kubernetes.
| Kubernetes version | Windows Server LTSC releases | Windows Server SAC releases |
| --- | --- | --- | --- |
| *Kubernetes v1.14* | Windows Server 2019 | Windows Server ver 1809 |
| *Kubernetes v1.15* | Windows Server 2019 | Windows Server ver 1809 |
| *Kubernetes v1.16* | Windows Server 2019 | Windows Server ver 1809 |
| *Kubernetes v1.17* | Windows Server 2019 | Windows Server ver 1809 |
| *Kubernetes v1.18* | Windows Server 2019 | Windows Server ver 1809, Windows Server ver 1903, Windows Server ver 1909 |
| *Kubernetes v1.19* | Windows Server 2019 | Windows Server ver 1909, Windows Server ver 2004 |
| *Kubernetes v1.20* | Windows Server 2019 | Windows Server ver 1909, Windows Server ver 2004 |
{{< note >}}
Information on the different Windows Server servicing channels including their support models can be found at [Windows Server servicing channels](https://docs.microsoft.com/en-us/windows-server/get-started-19/servicing-channels-19).
@ -59,6 +55,10 @@ The Windows Server Host Operating System is subject to the [Windows Server ](htt
Windows containers with process isolation have strict compatibility rules, [where the host OS version must match the container base image OS version](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility). Once we support Windows containers with Hyper-V isolation in Kubernetes, the limitation and compatibility rules will change.
{{< /note >}}
#### Pause Image
Microsoft maintains a Windows pause infrastructure container at `mcr.microsoft.com/oss/kubernetes/pause:1.4.1`.
#### Compute
From an API and kubectl perspective, Windows containers behave in much the same way as Linux-based containers. However, there are some notable differences in key functionality which are outlined in the [limitation section](#limitations).
@ -117,23 +117,22 @@ Docker EE-basic 19.03+ is the recommended container runtime for all Windows Serv
##### CRI-ContainerD
{{< feature-state for_k8s_version="v1.19" state="beta" >}}
{{< feature-state for_k8s_version="v1.20" state="stable" >}}
{{< caution >}}
There is a [known limitation](/docs/tasks/configure-pod-container/configure-gmsa/#gmsa-limitations) when using GMSA with ContainerD to access Windows network shares which requires a kernel patch. Check for updates on the [Microsoft Windows Containers issue tracker](https://github.com/microsoft/Windows-Containers/issues/44).
{{< /caution >}}
{{< glossary_tooltip term_id="containerd" text="ContainerD" >}} 1.4.0-beta.2+ can also be used as the container runtime for Windows Kubernetes nodes.
Initial support for ContainerD on Windows was added in Kubernetes v1.18. Progress for ContainerD on Windows can be tracked at [enhancements#1001](https://github.com/kubernetes/enhancements/issues/1001).
{{< glossary_tooltip term_id="containerd" text="ContainerD" >}} 1.4.0+ can also be used as the container runtime for Windows Kubernetes nodes.
Learn how to [install ContainerD on a Windows](/docs/setup/production-environment/container-runtimes/#install-containerd).
{{< caution >}}
There is a [known limitation](/docs/tasks/configure-pod-container/configure-gmsa/#gmsa-limitations) when using GMSA with ContainerD to access Windows network shares which requires a kernel patch. Updates to address this limitation are currently available for Windows Server, Version 2004 and will be available for Windows Server 2019 in early 2021. Check for updates on the [Microsoft Windows Containers issue tracker](https://github.com/microsoft/Windows-Containers/issues/44).
{{< /caution >}}
#### Persistent Storage
Kubernetes [volumes](/docs/concepts/storage/volumes/) enable complex applications, with data persistence and Pod volume sharing requirements, to be deployed on Kubernetes. Management of persistent volumes associated with a specific storage back-end or protocol includes actions such as: provisioning/de-provisioning/resizing of volumes, attaching/detaching a volume to/from a Kubernetes node and mounting/dismounting a volume to/from individual containers in a pod that needs to persist data. The code implementing these volume management actions for a specific storage back-end or protocol is shipped in the form of a Kubernetes volume [plugin](/docs/concepts/storage/volumes/#types-of-volumes). The following broad classes of Kubernetes volume plugins are supported on Windows:
##### In-tree Volume Plugins
Code associated with in-tree volume plugins ship as part of the core Kubernetes code base. Deployment of in-tree volume plugins do not require installation of additional scripts or deployment of separate containerized plugin components. These plugins can handle: provisioning/de-provisioning and resizing of volumes in the storage backend, attaching/detaching of volumes to/from a Kubernetes node and mounting/dismounting a volume to/from individual containers in a pod. The following in-tree plugins support Windows nodes:
* [awsElasticBlockStore](/docs/concepts/storage/volumes/#awselasticblockstore)
@ -143,6 +142,7 @@ Code associated with in-tree volume plugins ship as part of the core Kubernetes
* [vsphereVolume](/docs/concepts/storage/volumes/#vspherevolume)
##### FlexVolume Plugins
Code associated with [FlexVolume](/docs/concepts/storage/volumes/#flexVolume) plugins ship as out-of-tree scripts or binaries that need to be deployed directly on the host. FlexVolume plugins handle attaching/detaching of volumes to/from a Kubernetes node and mounting/dismounting a volume to/from individual containers in a pod. Provisioning/De-provisioning of persistent volumes associated with FlexVolume plugins may be handled through an external provisioner that is typically separate from the FlexVolume plugins. The following FlexVolume [plugins](https://github.com/Microsoft/K8s-Storage-Plugins/tree/master/flexvolume/windows), deployed as powershell scripts on the host, support Windows nodes:
* [SMB](https://github.com/microsoft/K8s-Storage-Plugins/tree/master/flexvolume/windows/plugins/microsoft.com~smb.cmd)
@ -150,7 +150,7 @@ Code associated with [FlexVolume](/docs/concepts/storage/volumes/#flexVolume) pl
##### CSI Plugins
{{< feature-state for_k8s_version="v1.16" state="alpha" >}}
{{< feature-state for_k8s_version="v1.19" state="beta" >}}
Code associated with {{< glossary_tooltip text="CSI" term_id="csi" >}} plugins ship as out-of-tree scripts and binaries that are typically distributed as container images and deployed using standard Kubernetes constructs like DaemonSets and StatefulSets. CSI plugins handle a wide range of volume management actions in Kubernetes: provisioning/de-provisioning/resizing of volumes, attaching/detaching of volumes to/from a Kubernetes node and mounting/dismounting a volume to/from individual containers in a pod, backup/restore of persistent data using snapshots and cloning. CSI plugins typically consist of node plugins (that run on each node as a DaemonSet) and controller plugins.
@ -173,6 +173,7 @@ The following service spec types are supported:
* ExternalName
##### Network modes
Windows supports five different networking drivers/modes: L2bridge, L2tunnel, Overlay, Transparent, and NAT. In a heterogeneous cluster with Windows and Linux worker nodes, you need to select a networking solution that is compatible on both Windows and Linux. The following out-of-tree plugins are supported on Windows, with recommendations on when to use each CNI:
| Network Driver | Description | Container Packet Modifications | Network Plugins | Network Plugin Characteristics |
@ -198,6 +199,7 @@ For the node, pod, and service objects, the following network flows are supporte
* Pod -> Node
##### IP address management (IPAM) {#ipam}
The following IPAM options are supported on Windows:
* [Host-local](https://github.com/containernetworking/plugins/tree/master/plugins/ipam/host-local)
@ -218,10 +220,11 @@ On Windows, you can use the following settings to configure Services and load ba
{{< /table >}}
#### IPv4/IPv6 dual-stack
You can enable IPv4/IPv6 dual-stack networking for `l2bridge` networks using the `IPv6DualStack` [feature gate](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/). See [enable IPv4/IPv6 dual stack](/docs/concepts/services-networking/dual-stack#enable-ipv4ipv6-dual-stack) for more details.
{{< note >}}
On Windows, using IPv6 with Kubernetes require Windows Server vNext Insider Preview Build 19603 (or higher).
On Windows, using IPv6 with Kubernetes require Windows Server, version 2004 (kernel version 10.0.19041.610) or later.
{{< /note >}}
{{< note >}}
@ -234,7 +237,7 @@ Overlay (VXLAN) networks on Windows do not support dual-stack networking today.
Windows is only supported as a worker node in the Kubernetes architecture and component matrix. This means that a Kubernetes cluster must always include Linux master nodes, zero or more Linux worker nodes, and zero or more Windows worker nodes.
#### Compute
#### Compute {compute-limitations}
##### Resource management and process isolation
@ -246,7 +249,7 @@ Windows has strict compatibility rules, where the host OS version must match the
##### Feature Restrictions
* TerminationGracePeriod: not implemented
* TerminationGracePeriod: requires CRI-containerD
* Single file mapping: to be implemented with CRI-ContainerD
* Termination message: to be implemented with CRI-ContainerD
* Privileged Containers: not currently supported in Windows containers
@ -270,12 +273,13 @@ The behavior of the flags behave differently as described below:
* MemoryPressure Condition is not implemented
* There are no OOM eviction actions taken by the kubelet
* Kubelet running on the windows node does not have memory restrictions. `--kubelet-reserve` and `--system-reserve` do not set limits on kubelet or processes running on the host. This means kubelet or a process on the host could cause memory resource starvation outside the node-allocatable and scheduler
* An additional flag to set the priority of the kubelet process is availabe on the Windows nodes called `--windows-priorityclass`. This flag allows kubelet process to get more CPU time slices when compared to other processes running on the Windows host. More information on the allowable values and their meaning is available at [Windows Priority Classes](https://docs.microsoft.com/en-us/windows/win32/procthread/scheduling-priorities#priority-class). In order for kubelet to always have enough CPU cycles it is recommended to set this flag to `ABOVE_NORMAL_PRIORITY_CLASS` and above
#### Storage
Windows has a layered filesystem driver to mount container layers and create a copy filesystem based on NTFS. All file paths in the container are resolved only within the context of that container.
* Volume mounts can only target a directory in the container, and not an individual file
* With Docker Volume mounts can only target a directory in the container, and not an individual file. This limitation does not exist with CRI-containerD.
* Volume mounts cannot project files or directories back to the host filesystem
* Read-only filesystems are not supported because write access is always required for the Windows registry and SAM database. However, read-only volumes are supported
* Volume user-masks and permissions are not available. Because the SAM is not shared between the host & container, there's no mapping between them. All permissions are resolved within the context of the container
@ -293,7 +297,7 @@ As a result, the following storage functionality is not supported on Windows nod
* NFS based storage/volume support
* Expanding the mounted volume (resizefs)
#### Networking
#### Networking {networking-limitations}
Windows Container Networking differs in some important ways from Linux networking. The [Microsoft documentation for Windows Container Networking](https://docs.microsoft.com/en-us/virtualization/windowscontainers/container-networking/architecture) contains additional details and background.
@ -332,10 +336,11 @@ These features were added in Kubernetes v1.15:
* On Windows, there are multiple DNS resolvers that can be used. As these come with slightly different behaviors, using the `Resolve-DNSName` utility for name query resolutions is recommended.
##### IPv6
Kubernetes on Windows does not support single-stack "IPv6-only" networking. However,dual-stack IPv4/IPv6 networking for pods and nodes with single-family services is supported. See [IPv4/IPv6 dual-stack networking](#ipv4ipv6-dual-stack) for more details.
##### Session affinity
Setting the maximum session sticky time for Windows services using `service.spec.sessionAffinityConfig.clientIP.timeoutSeconds` is not supported.
##### Security
@ -345,7 +350,7 @@ Secrets are written in clear text on the node's volume (as compared to tmpfs/in-
1. Use file ACLs to secure the secrets file location
2. Use volume-level encryption using [BitLocker](https://docs.microsoft.com/en-us/windows/security/information-protection/bitlocker/bitlocker-how-to-deploy-on-windows-server)
[RunAsUser ](/docs/concepts/policy/pod-security-policy/#users-and-groups)is not currently supported on Windows. The workaround is to create local accounts before packaging the container. The RunAsUsername capability may be added in a future release.
[RunAsUsername](/docs/tasks/configure-pod-container/configure-runasusername) can be specified for Windows Pod's or Container's to execute the Container processes as a node-default user. This is roughly equivalent to [RunAsUser](/docs/concepts/policy/pod-security-policy/#users-and-groups).
Linux specific pod security context privileges such as SELinux, AppArmor, Seccomp, Capabilities (POSIX Capabilities), and others are not supported.
@ -458,6 +463,7 @@ Your main source of help for troubleshooting your Kubernetes cluster should star
```
If the above referenced script is not suitable, you can manually configure nssm.exe using the following examples.
```powershell
# Register flanneld.exe
nssm install flanneld C:\flannel\flanneld.exe
@ -467,9 +473,9 @@ Your main source of help for troubleshooting your Kubernetes cluster should star
nssm start flanneld
# Register kubelet.exe
# Microsoft releases the pause infrastructure container at mcr.microsoft.com/k8s/core/pause:1.2.0
# Microsoft releases the pause infrastructure container at mcr.microsoft.com/oss/kubernetes/pause:1.4.1
nssm install kubelet C:\k\kubelet.exe
nssm set kubelet AppParameters --hostname-override=<hostname> --v=6 --pod-infra-container-image=mcr.microsoft.com/k8s/core/pause:1.2.0 --resolv-conf="" --allow-privileged=true --enable-debugging-handlers --cluster-dns=<DNS-service-IP> --cluster-domain=cluster.local --kubeconfig=c:\k\config --hairpin-mode=promiscuous-bridge --image-pull-progress-deadline=20m --cgroups-per-qos=false --log-dir=<log directory> --logtostderr=false --enforce-node-allocatable="" --network-plugin=cni --cni-bin-dir=c:\k\cni --cni-conf-dir=c:\k\cni\config
nssm set kubelet AppParameters --hostname-override=<hostname> --v=6 --pod-infra-container-image=mcr.microsoft.com/oss/kubernetes/pause:1.4.1 --resolv-conf="" --allow-privileged=true --enable-debugging-handlers --cluster-dns=<DNS-service-IP> --cluster-domain=cluster.local --kubeconfig=c:\k\config --hairpin-mode=promiscuous-bridge --image-pull-progress-deadline=20m --cgroups-per-qos=false --log-dir=<log directory> --logtostderr=false --enforce-node-allocatable="" --network-plugin=cni --cni-bin-dir=c:\k\cni --cni-conf-dir=c:\k\cni\config
nssm set kubelet AppDirectory C:\k
nssm start kubelet
@ -489,7 +495,6 @@ Your main source of help for troubleshooting your Kubernetes cluster should star
nssm start kube-proxy
```
For initial troubleshooting, you can use the following flags in [nssm.exe](https://nssm.cc/) to redirect stdout and stderr to a output file:
```powershell
@ -551,7 +556,7 @@ Your main source of help for troubleshooting your Kubernetes cluster should star
1. My Windows Pods cannot launch because of missing `/run/flannel/subnet.env`
This indicates that Flannel didn't launch correctly. You can either try to restart flanneld.exe or you can copy the files over manually from `/run/flannel/subnet.env` on the Kubernetes master to` C:\run\flannel\subnet.env` on the Windows worker node and modify the `FLANNEL_SUBNET` row to a different number. For example, if node subnet 10.244.4.1/24 is desired:
This indicates that Flannel didn't launch correctly. You can either try to restart flanneld.exe or you can copy the files over manually from `/run/flannel/subnet.env` on the Kubernetes master to `C:\run\flannel\subnet.env` on the Windows worker node and modify the `FLANNEL_SUBNET` row to a different number. For example, if node subnet 10.244.4.1/24 is desired:
```env
FLANNEL_NETWORK=10.244.0.0/16
@ -579,16 +584,14 @@ Your main source of help for troubleshooting your Kubernetes cluster should star
Check that your pause image is compatible with your OS version. The [instructions](https://docs.microsoft.com/en-us/virtualization/windowscontainers/kubernetes/deploying-resources) assume that both the OS and the containers are version 1803. If you have a later version of Windows, such as an Insider build, you need to adjust the images accordingly. Please refer to the Microsoft's [Docker repository](https://hub.docker.com/u/microsoft/) for images. Regardless, both the pause image Dockerfile and the sample service expect the image to be tagged as :latest.
Starting with Kubernetes v1.14, Microsoft releases the pause infrastructure container at `mcr.microsoft.com/k8s/core/pause:1.2.0`.
1. DNS resolution is not properly working
Check the DNS limitations for Windows in this [section](#dns-limitations).
1. `kubectl port-forward` fails with "unable to do port forwarding: wincat not found"
This was implemented in Kubernetes 1.15, and the pause infrastructure container `mcr.microsoft.com/k8s/core/pause:1.2.0`. Be sure to use these versions or newer ones.
If you would like to build your own pause infrastructure container, be sure to include [wincat](https://github.com/kubernetes-sigs/sig-windows-tools/tree/master/cmd/wincat)
This was implemented in Kubernetes 1.15 by including wincat.exe in the pause infrastructure container `mcr.microsoft.com/oss/kubernetes/pause:1.4.1`. Be sure to use these versions or newer ones.
If you would like to build your own pause infrastructure container be sure to include [wincat](https://github.com/kubernetes-sigs/sig-windows-tools/tree/master/cmd/wincat).
1. My Kubernetes installation is failing because my Windows Server node is behind a proxy
@ -603,7 +606,7 @@ Your main source of help for troubleshooting your Kubernetes cluster should star
In a Kubernetes Pod, an infrastructure or "pause" container is first created to host the container endpoint. Containers that belong to the same pod, including infrastructure and worker containers, share a common network namespace and endpoint (same IP and port space). Pause containers are needed to accommodate worker containers crashing or restarting without losing any of the networking configuration.
The "pause" (infrastructure) image is hosted on Microsoft Container Registry (MCR). You can access it using `docker pull mcr.microsoft.com/k8s/core/pause:1.2.0`. For more details, see the [DOCKERFILE](https://github.com/kubernetes-sigs/windows-testing/blob/master/images/pause/Dockerfile).
The "pause" (infrastructure) image is hosted on Microsoft Container Registry (MCR). You can access it using `mcr.microsoft.com/oss/kubernetes/pause:1.4.1`. For more details, see the [DOCKERFILE](https://github.com/kubernetes-sigs/windows-testing/blob/master/images/pause/Dockerfile).
### Further investigation
@ -625,11 +628,8 @@ If filing a bug, please include detailed information about how to reproduce the
* [Relevant logs](https://github.com/kubernetes/community/blob/master/sig-windows/CONTRIBUTING.md#gathering-logs)
* Tag the issue sig/windows by commenting on the issue with `/sig windows` to bring it to a SIG-Windows member's attention
## {{% heading "whatsnext" %}}
We have a lot of features in our roadmap. An abbreviated high level list is included below, but we encourage you to view our [roadmap project](https://github.com/orgs/kubernetes/projects/8) and help us make Windows support better by [contributing](https://github.com/kubernetes/community/blob/master/sig-windows/).
### Hyper-V isolation
@ -641,31 +641,7 @@ Hyper-V isolation is requried to enable the following use cases for Windows cont
* Specific CPU/NUMA settings for a pod
* Memory isolation and reservations
The existing Hyper-V isolation support, an experimental feature as of v1.10, will be deprecated in the future in favor of the CRI-ContainerD and RuntimeClass features mentioned above. To use the current features and create a Hyper-V isolated container, the kubelet should be started with feature gates `HyperVContainer=true` and the Pod should include the annotation `experimental.windows.kubernetes.io/isolation-type=hyperv`. In the experiemental release, this feature is limited to 1 container per Pod.
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: iis
spec:
selector:
matchLabels:
app: iis
replicas: 3
template:
metadata:
labels:
app: iis
annotations:
experimental.windows.kubernetes.io/isolation-type: hyperv
spec:
containers:
- name: iis
image: microsoft/iis
ports:
- containerPort: 80
```
Hyper-V isolation support will be added in a later release and will require CRI-Containerd.
### Deployment with kubeadm and cluster API
@ -674,10 +650,3 @@ cluster. Windows node support in kubeadm is currently a work-in-progress but a
guide is available [here](/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes/).
We are also making investments in cluster API to ensure Windows nodes are
properly provisioned.
### A few other key features
* Beta support for Group Managed Service Accounts
* More CNIs
* More Storage Plugins

View File

@ -177,7 +177,7 @@ This label reflects the Windows major, minor, and build number that need to matc
1. Save this file to `runtimeClasses.yml`. It includes the appropriate `nodeSelector` for the Windows OS, architecture, and version.
```yaml
apiVersion: node.k8s.io/v1beta1
apiVersion: node.k8s.io/v1
kind: RuntimeClass
metadata:
name: windows-2019

View File

@ -31,9 +31,11 @@ resources instead of a single large Endpoints resource.
{{< feature-state for_k8s_version="v1.17" state="beta" >}}
{{< note >}}
Although EndpointSlices may eventually replace Endpoints, many Kubernetes
components still rely on Endpoints. For now, enabling EndpointSlices should be
seen as an addition to Endpoints in a cluster, not a replacement for them.
The EndpointSlice resource was designed to address shortcomings in a earlier
resource: Endpoints. Some Kubernetes components and third-party applications
continue to use and rely on Endpoints. Whilst that remains the case,
EndpointSlices should be seen as an addition to Endpoints in a cluster, not as
an outright replacement.
{{< /note >}}
EndpointSlice functionality in Kubernetes is made up of several different
@ -60,13 +62,23 @@ components, most are enabled by default:
gate](/docs/reference/command-line-tools-reference/feature-gates/) on
kube-proxy.
## API fields
Some fields in the EndpointSlice API are feature-gated.
- The `EndpointSliceNodeName` feature gate controls access to the `nodeName`
field. This is an alpha feature that is disabled by default.
- The `EndpointSliceTerminating` feature gate controls access to the `serving`
and `terminating` condition fields. This is an alpha feature that is disabled
by default.
## Using EndpointSlices
With EndpointSlices fully enabled in your cluster, you should see corresponding
EndpointSlice resources for each Endpoints resource. In addition to supporting
existing Endpoints functionality, EndpointSlices include new bits of information
such as topology. They will allow for greater scalability and extensibility of
network endpoints in your cluster.
existing Endpoints functionality, EndpointSlices will allow for greater
scalability and extensibility of network endpoints in your cluster.
## {{% heading "whatsnext" %}}

View File

@ -170,6 +170,39 @@ to access a replica via its ephemeral public IP, you must skip TLS verification.
To allow etcd clustering, ports needed to communicate between etcd instances will be opened (for inside cluster communication).
To make such deployment secure, communication between etcd instances is authorized using SSL.
### API server identity
{{< feature-state state="alpha" for_k8s_version="v1.20" >}}
The API Server Identity feature is controlled by a
[feature gate](/docs/reference/command-line-tools-reference/feature-gates/)
and is not enabled by default. You can activate API Server Identity by enabling
the feature gate named `APIServerIdentity` when you start the
{{< glossary_tooltip text="API Server" term_id="kube-apiserver" >}}:
```shell
kube-apiserver \
--feature-gates=APIServerIdentity=true \
# …and other flags as usual
```
During bootstrap, each kube-apiserver assigns a unique ID to itself. The ID is
in the format of `kube-apiserver-{UUID}`. Each kube-apiserver creates a
[Lease](/docs/reference/generated/kubernetes-api/{{< param "version" >}}//#lease-v1-coordination-k8s-io)
in the _kube-system_ {{< glossary_tooltip text="namespaces" term_id="namespace">}}.
The Lease name is the unique ID for the kube-apiserver. The Lease contains a
label `k8s.io/component=kube-apiserver`. Each kube-apiserver refreshes its
Lease every `IdentityLeaseRenewIntervalSeconds` (defaults to 10s). Each
kube-apiserver also checks all the kube-apiserver identity Leases every
`IdentityLeaseDurationSeconds` (defaults to 3600s), and deletes Leases that
hasn't got refreshed for more than `IdentityLeaseDurationSeconds`.
`IdentityLeaseRenewIntervalSeconds` and `IdentityLeaseDurationSeconds` can be
configured by kube-apiserver flags `identity-lease-renew-interval-seconds`
and `identity-lease-duration-seconds`.
Enabling this feature is a prerequisite for using features that involve HA API
server coordination (for example, the `StorageVersionAPI` feature gate).
## Additional reading
[Automated HA master deployment - design doc](https://git.k8s.io/community/contributors/design-proposals/cluster-lifecycle/ha_master.md)

View File

@ -138,31 +138,91 @@ curl -L https://github.com/kubernetes-sigs/sig-windows-tools/releases/latest/dow
### Joining a Windows worker node
{{< note >}}
You must install the `Containers` feature and install Docker. Instructions
to do so are available at [Install Docker Engine - Enterprise on Windows Servers](https://hub.docker.com/editions/enterprise/docker-ee-server-windows).
{{< /note >}}
{{< note >}}
All code snippets in Windows sections are to be run in a PowerShell environment
with elevated permissions (Administrator) on the Windows worker node.
{{< /note >}}
1. Install wins, kubelet, and kubeadm.
{{< tabs name="tab-windows-kubeadm-runtime-installation" >}}
{{% tab name="Docker EE" %}}
```PowerShell
curl.exe -LO https://github.com/kubernetes-sigs/sig-windows-tools/releases/latest/download/PrepareNode.ps1
.\PrepareNode.ps1 -KubernetesVersion {{< param "fullversion" >}}
```
#### Install Docker EE
1. Run `kubeadm` to join the node
Install the `Containers` feature
Use the command that was given to you when you ran `kubeadm init` on a control plane host.
If you no longer have this command, or the token has expired, you can run `kubeadm token create --print-join-command`
(on a control plane host) to generate a new token and join command.
```powershell
Install-WindowsFeature -Name containers
```
Install Docker
Instructions to do so are available at [Install Docker Engine - Enterprise on Windows Servers](https://hub.docker.com/editions/enterprise/docker-ee-server-windows).
#### Install wins, kubelet, and kubeadm
```PowerShell
curl.exe -LO https://github.com/kubernetes-sigs/sig-windows-tools/releases/latest/download/PrepareNode.ps1
.\PrepareNode.ps1 -KubernetesVersion {{< param "fullversion" >}}
```
#### Run `kubeadm` to join the node
Use the command that was given to you when you ran `kubeadm init` on a control plane host.
If you no longer have this command, or the token has expired, you can run `kubeadm token create --print-join-command`
(on a control plane host) to generate a new token and join command.
{{% /tab %}}
{{% tab name="CRI-containerD" %}}
#### Install containerD
```powershell
curl.exe -LO https://github.com/kubernetes-sigs/sig-windows-tools/releases/latest/download/Install-Containerd.ps1
.\Install-Containerd.ps1
```
{{< note >}}
To install a specific version of containerD specify the version with -ContainerDVersion.
```powershell
# Example
.\Install-Containerd.ps1 -ContainerDVersion v1.4.1
```
{{< /note >}}
{{< note >}}
If you're using a different interface rather than Ethernet (i.e. "Ethernet0 2") on the Windows nodes, specify the name with `-netAdapterName`.
```powershell
# Example
.\Install-Containerd.ps1 -netAdapterName "Ethernet0 2"
```
{{< /note >}}
#### Install wins, kubelet, and kubeadm
```PowerShell
curl.exe -LO https://github.com/kubernetes-sigs/sig-windows-tools/releases/latest/download/PrepareNode.ps1
.\PrepareNode.ps1 -KubernetesVersion {{< param "fullversion" >}} -ContainerRuntime containerD
```
#### Run `kubeadm` to join the node
Use the command that was given to you when you ran `kubeadm init` on a control plane host.
If you no longer have this command, or the token has expired, you can run `kubeadm token create --print-join-command`
(on a control plane host) to generate a new token and join command.
{{< note >}}
If using **CRI-containerD** add `--cri-socket "npipe:////./pipe/containerd-containerd"` to the kubeadm call
{{< /note >}}
{{% /tab %}}
{{< /tabs >}}
### Verifying your installation
#### Verifying your installation
You should now be able to view the Windows node in your cluster by running:
```bash
@ -178,11 +238,6 @@ kubectl -n kube-system get pods -l app=flannel
Once the flannel Pod is running, your node should enter the `Ready` state and then be available to handle workloads.
## {{% heading "whatsnext" %}}
- [Upgrading Windows kubeadm nodes](/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes)

View File

@ -52,7 +52,7 @@ setting up a cluster to use an external CA.
You can use the `check-expiration` subcommand to check when certificates expire:
```
kubeadm alpha certs check-expiration
kubeadm certs check-expiration
```
The output is similar to this:
@ -120,7 +120,7 @@ command. In that case, you should explicitly set `--certificate-renewal=true`.
## Manual certificate renewal
You can renew your certificates manually at any time with the `kubeadm alpha certs renew` command.
You can renew your certificates manually at any time with the `kubeadm certs renew` command.
This command performs the renewal using CA (or front-proxy-CA) certificate and key stored in `/etc/kubernetes/pki`.
@ -129,10 +129,10 @@ If you are running an HA cluster, this command needs to be executed on all the c
{{< /warning >}}
{{< note >}}
`alpha certs renew` uses the existing certificates as the authoritative source for attributes (Common Name, Organization, SAN, etc.) instead of the kubeadm-config ConfigMap. It is strongly recommended to keep them both in sync.
`certs renew` uses the existing certificates as the authoritative source for attributes (Common Name, Organization, SAN, etc.) instead of the kubeadm-config ConfigMap. It is strongly recommended to keep them both in sync.
{{< /note >}}
`kubeadm alpha certs renew` provides the following options:
`kubeadm certs renew` provides the following options:
The Kubernetes certificates normally reach their expiration date after one year.
@ -170,14 +170,14 @@ controllerManager:
### Create certificate signing requests (CSR)
You can create the certificate signing requests for the Kubernetes certificates API with `kubeadm alpha certs renew --use-api`.
You can create the certificate signing requests for the Kubernetes certificates API with `kubeadm certs renew --use-api`.
If you set up an external signer such as [cert-manager](https://github.com/jetstack/cert-manager), certificate signing requests (CSRs) are automatically approved.
Otherwise, you must manually approve certificates with the [`kubectl certificate`](/docs/setup/best-practices/certificates/) command.
The following kubeadm command outputs the name of the certificate to approve, then blocks and waits for approval to occur:
```shell
sudo kubeadm alpha certs renew apiserver --use-api &
sudo kubeadm certs renew apiserver --use-api &
```
The output is similar to this:
```
@ -211,13 +211,13 @@ In kubeadm terms, any certificate that would normally be signed by an on-disk CA
### Create certificate signing requests (CSR)
You can create certificate signing requests with `kubeadm alpha certs renew --csr-only`.
You can create certificate signing requests with `kubeadm certs renew --csr-only`.
Both the CSR and the accompanying private key are given in the output.
You can pass in a directory with `--csr-dir` to output the CSRs to the specified location.
If `--csr-dir` is not specified, the default certificate directory (`/etc/kubernetes/pki`) is used.
Certificates can be renewed with `kubeadm alpha certs renew --csr-only`.
Certificates can be renewed with `kubeadm certs renew --csr-only`.
As with `kubeadm init`, an output directory can be specified with the `--csr-dir` flag.
A CSR contains a certificate's name, domains, and IPs, but it does not specify usages.

View File

@ -4,85 +4,88 @@ reviewers:
title: Upgrading kubeadm clusters
content_type: task
weight: 20
min-kubernetes-server-version: 1.19
---
<!-- overview -->
This page explains how to upgrade a Kubernetes cluster created with kubeadm from version
1.18.x to version 1.19.x, and from version 1.19.x to 1.19.y (where `y > x`).
{{< skew latestVersionAddMinor -1 >}}.x to version {{< skew latestVersion >}}.x, and from version
{{< skew latestVersion >}}.x to {{< skew latestVersion >}}.y (where `y > x`). Skipping MINOR versions
when upgrading is unsupported.
To see information about upgrading clusters created using older versions of kubeadm,
please refer to following pages instead:
- [Upgrading kubeadm cluster from 1.17 to 1.18](https://v1-18.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)
- [Upgrading kubeadm cluster from 1.16 to 1.17](https://v1-17.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)
- [Upgrading kubeadm cluster from 1.15 to 1.16](https://v1-16.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)
- [Upgrading kubeadm cluster from 1.14 to 1.15](https://v1-15.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-15/)
- [Upgrading kubeadm cluster from 1.13 to 1.14](https://v1-15.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-14/)
- [Upgrading a kubeadm cluster from {{< skew latestVersionAddMinor -2 >}} to {{< skew latestVersionAddMinor -1 >}}](https://v{{< skew latestVersionAddMinor -1 "-" >}}.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)
- [Upgrading a kubeadm cluster from {{< skew latestVersionAddMinor -3 >}} to {{< skew latestVersionAddMinor -2 >}}](https://v{{< skew latestVersionAddMinor -2 "-" >}}.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)
- [Upgrading a kubeadm cluster from {{< skew latestVersionAddMinor -4 >}} to {{< skew latestVersionAddMinor -3 >}}](https://v{{< skew latestVersionAddMinor -3 "-" >}}.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)
- [Upgrading a kubeadm cluster from {{< skew latestVersionAddMinor -5 >}} to {{< skew latestVersionAddMinor -4 >}}](https://v{{< skew latestVersionAddMinor -4 "-" >}}.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)
The upgrade workflow at high level is the following:
1. Upgrade the primary control plane node.
1. Upgrade a primary control plane node.
1. Upgrade additional control plane nodes.
1. Upgrade worker nodes.
## {{% heading "prerequisites" %}}
- You need to have a kubeadm Kubernetes cluster running version 1.18.0 or later.
- [Swap must be disabled](https://serverfault.com/questions/684771/best-way-to-disable-swap-in-linux).
- The cluster should use a static control plane and etcd pods or external etcd.
- Make sure you read the [release notes]({{< latest-release-notes >}}) carefully.
- The cluster should use a static control plane and etcd pods or external etcd.
- Make sure to back up any important components, such as app-level state stored in a database.
`kubeadm upgrade` does not touch your workloads, only components internal to Kubernetes, but backups are always a best practice.
- [Swap must be disabled](https://serverfault.com/questions/684771/best-way-to-disable-swap-in-linux).
### Additional information
- [Draining nodes](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) before kubelet MINOR version
upgrades is required. In the case of control plane nodes, they could be running CoreDNS Pods or other critical workloads.
- All containers are restarted after upgrade, because the container spec hash value is changed.
- You only can upgrade from one MINOR version to the next MINOR version,
or between PATCH versions of the same MINOR. That is, you cannot skip MINOR versions when you upgrade.
For example, you can upgrade from 1.y to 1.y+1, but not from 1.y to 1.y+2.
<!-- steps -->
## Determine which version to upgrade to
Find the latest stable 1.19 version:
Find the latest stable {{< skew latestVersion >}} version using the OS package manager:
{{< tabs name="k8s_install_versions" >}}
{{% tab name="Ubuntu, Debian or HypriotOS" %}}
apt update
apt-cache madison kubeadm
# find the latest 1.19 version in the list
# it should look like 1.19.x-00, where x is the latest patch
# find the latest {{< skew latestVersion >}} version in the list
# it should look like {{< skew latestVersion >}}.x-00, where x is the latest patch
{{% /tab %}}
{{% tab name="CentOS, RHEL or Fedora" %}}
yum list --showduplicates kubeadm --disableexcludes=kubernetes
# find the latest 1.19 version in the list
# it should look like 1.19.x-0, where x is the latest patch
# find the latest {{< skew latestVersion >}} version in the list
# it should look like {{< skew latestVersion >}}.x-0, where x is the latest patch
{{% /tab %}}
{{< /tabs >}}
## Upgrading control plane nodes
### Upgrade the first control plane node
The upgrade procedure on control plane nodes should be executed one node at a time.
Pick a control plane node that you wish to upgrade first. It must have the `/etc/kubernetes/admin.conf` file.
- On your first control plane node, upgrade kubeadm:
### Call "kubeadm upgrade"
**For the first control plane node**
- Upgrade kubeadm:
{{< tabs name="k8s_install_kubeadm_first_cp" >}}
{{% tab name="Ubuntu, Debian or HypriotOS" %}}
# replace x in 1.19.x-00 with the latest patch version
# replace x in {{< skew latestVersion >}}.x-00 with the latest patch version
apt-mark unhold kubeadm && \
apt-get update && apt-get install -y kubeadm=1.19.x-00 && \
apt-get update && apt-get install -y kubeadm={{< skew latestVersion >}}.x-00 && \
apt-mark hold kubeadm
-
# since apt-get version 1.1 you can also use the following method
apt-get update && \
apt-get install -y --allow-change-held-packages kubeadm=1.19.x-00
apt-get install -y --allow-change-held-packages kubeadm={{< skew latestVersion >}}.x-00
{{% /tab %}}
{{% tab name="CentOS, RHEL or Fedora" %}}
# replace x in 1.19.x-0 with the latest patch version
yum install -y kubeadm-1.19.x-0 --disableexcludes=kubernetes
# replace x in {{< skew latestVersion >}}.x-0 with the latest patch version
yum install -y kubeadm-{{< skew latestVersion >}}.x-0 --disableexcludes=kubernetes
{{% /tab %}}
{{< /tabs >}}
@ -92,63 +95,10 @@ Find the latest stable 1.19 version:
kubeadm version
```
- Drain the control plane node:
- Verify the upgrade plan:
```shell
# replace <cp-node-name> with the name of your control plane node
kubectl drain <cp-node-name> --ignore-daemonsets
```
- On the control plane node, run:
```shell
sudo kubeadm upgrade plan
```
You should see output similar to this:
```
[upgrade/config] Making sure the configuration is correct:
[upgrade/config] Reading configuration from the cluster...
[upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[preflight] Running pre-flight checks.
[upgrade] Running cluster health checks
[upgrade] Fetching available versions to upgrade to
[upgrade/versions] Cluster version: v1.18.4
[upgrade/versions] kubeadm version: v1.19.0
[upgrade/versions] Latest stable version: v1.19.0
[upgrade/versions] Latest version in the v1.18 series: v1.18.4
Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':
COMPONENT CURRENT AVAILABLE
Kubelet 1 x v1.18.4 v1.19.0
Upgrade to the latest version in the v1.18 series:
COMPONENT CURRENT AVAILABLE
API Server v1.18.4 v1.19.0
Controller Manager v1.18.4 v1.19.0
Scheduler v1.18.4 v1.19.0
Kube Proxy v1.18.4 v1.19.0
CoreDNS 1.6.7 1.7.0
Etcd 3.4.3-0 3.4.7-0
You can now apply the upgrade by executing the following command:
kubeadm upgrade apply v1.19.0
_____________________________________________________________________
The table below shows the current state of component configs as understood by this version of kubeadm.
Configs that have a "yes" mark in the "MANUAL UPGRADE REQUIRED" column require manual config upgrade or
resetting to kubeadm defaults before a successful upgrade can be performed. The version to manually
upgrade to is denoted in the "PREFERRED VERSION" column.
API GROUP CURRENT VERSION PREFERRED VERSION MANUAL UPGRADE REQUIRED
kubeproxy.config.k8s.io v1alpha1 v1alpha1 no
kubelet.config.k8s.io v1beta1 v1beta1 no
_____________________________________________________________________
kubeadm upgrade plan
```
This command checks that your cluster can be upgraded, and fetches the versions you can upgrade to.
@ -170,90 +120,13 @@ Failing to do so will cause `kubeadm upgrade apply` to exit with an error and no
```shell
# replace x with the patch version you picked for this upgrade
sudo kubeadm upgrade apply v1.19.x
sudo kubeadm upgrade apply v{{< skew latestVersion >}}.x
```
You should see output similar to this:
Once the command finishes you should see:
```
[upgrade/config] Making sure the configuration is correct:
[upgrade/config] Reading configuration from the cluster...
[upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[preflight] Running pre-flight checks.
[upgrade] Running cluster health checks
[upgrade/version] You have chosen to change the cluster version to "v1.19.0"
[upgrade/versions] Cluster version: v1.18.4
[upgrade/versions] kubeadm version: v1.19.0
[upgrade/confirm] Are you sure you want to proceed with the upgrade? [y/N]: y
[upgrade/prepull] Pulling images required for setting up a Kubernetes cluster
[upgrade/prepull] This might take a minute or two, depending on the speed of your internet connection
[upgrade/prepull] You can also perform this action in beforehand using 'kubeadm config images pull'
[upgrade/apply] Upgrading your Static Pod-hosted control plane to version "v1.19.0"...
Static pod: kube-apiserver-kind-control-plane hash: b4c8effe84b4a70031f9a49a20c8b003
Static pod: kube-controller-manager-kind-control-plane hash: 9ac092f0ca813f648c61c4d5fcbf39f2
Static pod: kube-scheduler-kind-control-plane hash: 7da02f2c78da17af7c2bf1533ecf8c9a
[upgrade/etcd] Upgrading to TLS for etcd
Static pod: etcd-kind-control-plane hash: 171c56cd0e81c0db85e65d70361ceddf
[upgrade/staticpods] Preparing for "etcd" upgrade
[upgrade/staticpods] Renewing etcd-server certificate
[upgrade/staticpods] Renewing etcd-peer certificate
[upgrade/staticpods] Renewing etcd-healthcheck-client certificate
[upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/etcd.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2020-07-13-16-24-16/etcd.yaml"
[upgrade/staticpods] Waiting for the kubelet to restart the component
[upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s)
Static pod: etcd-kind-control-plane hash: 171c56cd0e81c0db85e65d70361ceddf
Static pod: etcd-kind-control-plane hash: 171c56cd0e81c0db85e65d70361ceddf
Static pod: etcd-kind-control-plane hash: 59e40b2aab1cd7055e64450b5ee438f0
[apiclient] Found 1 Pods for label selector component=etcd
[upgrade/staticpods] Component "etcd" upgraded successfully!
[upgrade/etcd] Waiting for etcd to become available
[upgrade/staticpods] Writing new Static Pod manifests to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests999800980"
[upgrade/staticpods] Preparing for "kube-apiserver" upgrade
[upgrade/staticpods] Renewing apiserver certificate
[upgrade/staticpods] Renewing apiserver-kubelet-client certificate
[upgrade/staticpods] Renewing front-proxy-client certificate
[upgrade/staticpods] Renewing apiserver-etcd-client certificate
[upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-apiserver.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2020-07-13-16-24-16/kube-apiserver.yaml"
[upgrade/staticpods] Waiting for the kubelet to restart the component
[upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s)
Static pod: kube-apiserver-kind-control-plane hash: b4c8effe84b4a70031f9a49a20c8b003
Static pod: kube-apiserver-kind-control-plane hash: b4c8effe84b4a70031f9a49a20c8b003
Static pod: kube-apiserver-kind-control-plane hash: b4c8effe84b4a70031f9a49a20c8b003
Static pod: kube-apiserver-kind-control-plane hash: b4c8effe84b4a70031f9a49a20c8b003
Static pod: kube-apiserver-kind-control-plane hash: f717874150ba572f020dcd89db8480fc
[apiclient] Found 1 Pods for label selector component=kube-apiserver
[upgrade/staticpods] Component "kube-apiserver" upgraded successfully!
[upgrade/staticpods] Preparing for "kube-controller-manager" upgrade
[upgrade/staticpods] Renewing controller-manager.conf certificate
[upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-controller-manager.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2020-07-13-16-24-16/kube-controller-manager.yaml"
[upgrade/staticpods] Waiting for the kubelet to restart the component
[upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s)
Static pod: kube-controller-manager-kind-control-plane hash: 9ac092f0ca813f648c61c4d5fcbf39f2
Static pod: kube-controller-manager-kind-control-plane hash: b155b63c70e798b806e64a866e297dd0
[apiclient] Found 1 Pods for label selector component=kube-controller-manager
[upgrade/staticpods] Component "kube-controller-manager" upgraded successfully!
[upgrade/staticpods] Preparing for "kube-scheduler" upgrade
[upgrade/staticpods] Renewing scheduler.conf certificate
[upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-scheduler.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2020-07-13-16-24-16/kube-scheduler.yaml"
[upgrade/staticpods] Waiting for the kubelet to restart the component
[upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s)
Static pod: kube-scheduler-kind-control-plane hash: 7da02f2c78da17af7c2bf1533ecf8c9a
Static pod: kube-scheduler-kind-control-plane hash: 260018ac854dbf1c9fe82493e88aec31
[apiclient] Found 1 Pods for label selector component=kube-scheduler
[upgrade/staticpods] Component "kube-scheduler" upgraded successfully!
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.19" in namespace kube-system with the configuration for the kubelets in the cluster
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
W0713 16:26:14.074656 2986 dns.go:282] the CoreDNS Configuration will not be migrated due to unsupported version of CoreDNS. The existing CoreDNS Corefile configuration and deployment has been retained.
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
[upgrade/successful] SUCCESS! Your cluster was upgraded to "v1.19.0". Enjoy!
[upgrade/successful] SUCCESS! Your cluster was upgraded to "v{{< skew latestVersion >}}.x". Enjoy!
[upgrade/kubelet] Now that your control plane is upgraded, please proceed with upgrading your kubelets if you haven't already done so.
```
@ -266,14 +139,7 @@ Failing to do so will cause `kubeadm upgrade apply` to exit with an error and no
This step is not required on additional control plane nodes if the CNI provider runs as a DaemonSet.
- Uncordon the control plane node:
```shell
# replace <cp-node-name> with the name of your control plane node
kubectl uncordon <cp-node-name>
```
### Upgrade additional control plane nodes
**For the other control plane nodes**
Same as the first control plane node but use:
@ -287,35 +153,57 @@ instead of:
sudo kubeadm upgrade apply
```
Also `sudo kubeadm upgrade plan` is not needed.
Also calling `kubeadm upgrade plan` and upgrading the CNI provider plugin is no longer needed.
### Drain the node
- Prepare the node for maintenance by marking it unschedulable and evicting the workloads:
```shell
# replace <node-to-drain> with the name of your node you are draining
kubectl drain <node-to-drain> --ignore-daemonsets
```
### Upgrade kubelet and kubectl
Upgrade the kubelet and kubectl on all control plane nodes:
- Upgrade the kubelet and kubectl
{{< tabs name="k8s_install_kubelet" >}}
{{% tab name="Ubuntu, Debian or HypriotOS" %}}
# replace x in 1.19.x-00 with the latest patch version
{{< tab name="Ubuntu, Debian or HypriotOS" >}}
<pre>
# replace x in {{< skew latestVersion >}}.x-00 with the latest patch version
apt-mark unhold kubelet kubectl && \
apt-get update && apt-get install -y kubelet=1.19.x-00 kubectl=1.19.x-00 && \
apt-get update && apt-get install -y kubelet={{< skew latestVersion >}}.x-00 kubectl={{< skew latestVersion >}}.x-00 && \
apt-mark hold kubelet kubectl
-
# since apt-get version 1.1 you can also use the following method
apt-get update && \
apt-get install -y --allow-change-held-packages kubelet=1.19.x-00 kubectl=1.19.x-00
{{% /tab %}}
{{% tab name="CentOS, RHEL or Fedora" %}}
# replace x in 1.19.x-0 with the latest patch version
yum install -y kubelet-1.19.x-0 kubectl-1.19.x-0 --disableexcludes=kubernetes
{{% /tab %}}
apt-get install -y --allow-change-held-packages kubelet={{< skew latestVersion >}}.x-00 kubectl={{< skew latestVersion >}}.x-00
</pre>
{{< /tab >}}
{{< tab name="CentOS, RHEL or Fedora" >}}
<pre>
# replace x in {{< skew latestVersion >}}.x-0 with the latest patch version
yum install -y kubelet-{{< skew latestVersion >}}.x-0 kubectl-{{< skew latestVersion >}}.x-0 --disableexcludes=kubernetes
</pre>
{{< /tab >}}
{{< /tabs >}}
Restart the kubelet
- Restart the kubelet:
```shell
sudo systemctl daemon-reload
sudo systemctl restart kubelet
```
```shell
sudo systemctl daemon-reload
sudo systemctl restart kubelet
```
### Uncordon the node
- Bring the node back online by marking it schedulable:
```shell
# replace <node-to-drain> with the name of your node
kubectl uncordon <node-to-drain>
```
## Upgrade worker nodes
@ -324,22 +212,22 @@ without compromising the minimum required capacity for running your workloads.
### Upgrade kubeadm
- Upgrade kubeadm on all worker nodes:
- Upgrade kubeadm:
{{< tabs name="k8s_install_kubeadm_worker_nodes" >}}
{{% tab name="Ubuntu, Debian or HypriotOS" %}}
# replace x in 1.19.x-00 with the latest patch version
# replace x in {{< skew latestVersion >}}.x-00 with the latest patch version
apt-mark unhold kubeadm && \
apt-get update && apt-get install -y kubeadm=1.19.x-00 && \
apt-get update && apt-get install -y kubeadm={{< skew latestVersion >}}.x-00 && \
apt-mark hold kubeadm
-
# since apt-get version 1.1 you can also use the following method
apt-get update && \
apt-get install -y --allow-change-held-packages kubeadm=1.19.x-00
apt-get install -y --allow-change-held-packages kubeadm={{< skew latestVersion >}}.x-00
{{% /tab %}}
{{% tab name="CentOS, RHEL or Fedora" %}}
# replace x in 1.19.x-0 with the latest patch version
yum install -y kubeadm-1.19.x-0 --disableexcludes=kubernetes
# replace x in {{< skew latestVersion >}}.x-0 with the latest patch version
yum install -y kubeadm-{{< skew latestVersion >}}.x-0 --disableexcludes=kubernetes
{{% /tab %}}
{{< /tabs >}}
@ -352,17 +240,9 @@ without compromising the minimum required capacity for running your workloads.
kubectl drain <node-to-drain> --ignore-daemonsets
```
You should see output similar to this:
### Call "kubeadm upgrade"
```
node/ip-172-31-85-18 cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-system/kube-proxy-dj7d7, kube-system/weave-net-z65qx
node/ip-172-31-85-18 drained
```
### Upgrade the kubelet configuration
- Call the following command:
- For worker nodes this upgrades the local kubelet configuration:
```shell
sudo kubeadm upgrade node
@ -370,26 +250,26 @@ without compromising the minimum required capacity for running your workloads.
### Upgrade kubelet and kubectl
- Upgrade the kubelet and kubectl on all worker nodes:
- Upgrade the kubelet and kubectl:
{{< tabs name="k8s_kubelet_and_kubectl" >}}
{{% tab name="Ubuntu, Debian or HypriotOS" %}}
# replace x in 1.19.x-00 with the latest patch version
# replace x in {{< skew latestVersion >}}.x-00 with the latest patch version
apt-mark unhold kubelet kubectl && \
apt-get update && apt-get install -y kubelet=1.19.x-00 kubectl=1.19.x-00 && \
apt-get update && apt-get install -y kubelet={{< skew latestVersion >}}.x-00 kubectl={{< skew latestVersion >}}.x-00 && \
apt-mark hold kubelet kubectl
-
# since apt-get version 1.1 you can also use the following method
apt-get update && \
apt-get install -y --allow-change-held-packages kubelet=1.19.x-00 kubectl=1.19.x-00
apt-get install -y --allow-change-held-packages kubelet={{< skew latestVersion >}}.x-00 kubectl={{< skew latestVersion >}}.x-00
{{% /tab %}}
{{% tab name="CentOS, RHEL or Fedora" %}}
# replace x in 1.19.x-0 with the latest patch version
yum install -y kubelet-1.19.x-0 kubectl-1.19.x-0 --disableexcludes=kubernetes
# replace x in {{< skew latestVersion >}}.x-0 with the latest patch version
yum install -y kubelet-{{< skew latestVersion >}}.x-0 kubectl-{{< skew latestVersion >}}.x-0 --disableexcludes=kubernetes
{{% /tab %}}
{{< /tabs >}}
- Restart the kubelet
- Restart the kubelet:
```shell
sudo systemctl daemon-reload
@ -407,7 +287,8 @@ without compromising the minimum required capacity for running your workloads.
## Verify the status of the cluster
After the kubelet is upgraded on all nodes verify that all nodes are available again by running the following command from anywhere kubectl can access the cluster:
After the kubelet is upgraded on all nodes verify that all nodes are available again by running the following command
from anywhere kubectl can access the cluster:
```shell
kubectl get nodes
@ -415,8 +296,6 @@ kubectl get nodes
The `STATUS` column should show `Ready` for all your nodes, and the version number should be updated.
## Recovering from a failure state
If `kubeadm upgrade` fails and does not roll back, for example because of an unexpected shutdown during execution, you can run `kubeadm upgrade` again.
@ -428,11 +307,11 @@ During upgrade kubeadm writes the following backup folders under `/etc/kubernete
- `kubeadm-backup-etcd-<date>-<time>`
- `kubeadm-backup-manifests-<date>-<time>`
`kubeadm-backup-etcd` contains a backup of the local etcd member data for this control-plane Node.
`kubeadm-backup-etcd` contains a backup of the local etcd member data for this control plane Node.
In case of an etcd upgrade failure and if the automatic rollback does not work, the contents of this folder
can be manually restored in `/var/lib/etcd`. In case external etcd is used this backup folder will be empty.
`kubeadm-backup-manifests` contains a backup of the static Pod manifest files for this control-plane Node.
`kubeadm-backup-manifests` contains a backup of the static Pod manifest files for this control plane Node.
In case of a upgrade failure and if the automatic rollback does not work, the contents of this folder can be
manually restored in `/etc/kubernetes/manifests`. If for some reason there is no difference between a pre-upgrade
and post-upgrade manifest file for a certain component, a backup file for it will not be written.

View File

@ -51,19 +51,65 @@ The hint is then stored in the Topology Manager for use by the *Hint Providers*
Support for the Topology Manager requires `TopologyManager` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to be enabled. It is enabled by default starting with Kubernetes 1.18.
### Topology Manager Policies
## Topology Manager Scopes and Policies
The Topology Manager currently:
- Aligns Pods of all QoS classes.
- Aligns the requested resources that Hint Provider provides topology hints for.
If these conditions are met, Topology Manager will align the requested resources.
If these conditions are met, the Topology Manager will align the requested resources.
In order to customise how this alignment is carried out, the Topology Manager provides two distinct knobs: `scope` and `policy`.
The `scope` defines the granularity at which you would like resource alignment to be performed (e.g. at the `pod` or `container` level). And the `policy` defines the actual strategy used to carry out the alignment (e.g. `best-effort`, `restricted`, `single-numa-node`, etc.).
Details on the various `scopes` and `policies` available today can be found below.
{{< note >}}
To align CPU resources with other requested resources in a Pod Spec, the CPU Manager should be enabled and proper CPU Manager policy should be configured on a Node. See [control CPU Management Policies](/docs/tasks/administer-cluster/cpu-management-policies/).
{{< /note >}}
### Topology Manager Scopes
The Topology Manager can deal with the alignment of resources in a couple of distinct scopes:
* `container` (default)
* `pod`
Either option can be selected at a time of the kubelet startup, with `--topology-manager-scope` flag.
### container scope
The `container` scope is used by default.
Within this scope, the Topology Manager performs a number of sequential resource alignments, i.e., for each container (in a pod) a separate alignment is computed. In other words, there is no notion of grouping the containers to a specific set of NUMA nodes, for this particular scope. In effect, the Topology Manager performs an arbitrary alignment of individual containers to NUMA nodes.
The notion of grouping the containers was endorsed and implemented on purpose in the following scope, for example the `pod` scope.
### pod scope
To select the `pod` scope, start the kubelet with the command line option `--topology-manager-scope=pod`.
This scope allows for grouping all containers in a pod to a common set of NUMA nodes. That is, the Topology Manager treats a pod as a whole and attempts to allocate the entire pod (all containers) to either a single NUMA node or a common set of NUMA nodes. The following examples illustrate the alignments produced by the Topology Manager on different occasions:
* all containers can be and are allocated to a single NUMA node;
* all containers can be and are allocated to a shared set of NUMA nodes.
The total amount of particular resource demanded for the entire pod is calculated according to [effective requests/limits](/docs/concepts/workloads/pods/init-containers/#resources) formula, and thus, this total value is equal to the maximum of:
* the sum of all app container requests,
* the maximum of init container requests,
for a resource.
Using the `pod` scope in tandem with `single-numa-node` Topology Manager policy is specifically valuable for workloads that are latency sensitive or for high-throughput applications that perform IPC. By combining both options, you are able to place all containers in a pod onto a single NUMA node; hence, the inter-NUMA communication overhead can be eliminated for that pod.
In the case of `single-numa-node` policy, a pod is accepted only if a suitable set of NUMA nodes is present among possible allocations. Reconsider the example above:
* a set containing only a single NUMA node - it leads to pod being admitted,
* whereas a set containing more NUMA nodes - it results in pod rejection (because instead of one NUMA node, two or more NUMA nodes are required to satisfy the allocation).
To recap, Topology Manager first computes a set of NUMA nodes and then tests it against Topology Manager policy, which either leads to the rejection or admission of the pod.
### Topology Manager Policies
Topology Manager supports four allocation policies. You can set a policy via a Kubelet flag, `--topology-manager-policy`.
There are four supported policies:
@ -73,6 +119,10 @@ There are four supported policies:
* `restricted`
* `single-numa-node`
{{< note >}}
If Topology Manager is configured with the **pod** scope, the container, which is considered by the policy, is reflecting requirements of the entire pod, and thus each container from the pod will result with **the same** topology alignment decision.
{{< /note >}}
### none policy {#policy-none}
This is the default policy and does not perform any topology alignment.

View File

@ -336,6 +336,30 @@ and startup Probes. Minimum value is 1.
try `failureThreshold` times before giving up. Giving up in case of liveness probe means restarting the container. In case of readiness probe the Pod will be marked Unready.
Defaults to 3. Minimum value is 1.
{{< note >}}
Before Kubernetes 1.20, the field `timeoutSeconds` was not respected for exec probes:
probes continued running indefinitely, even past their configured deadline,
until a result was returned.
This defect was corrected in Kubernetes v1.20. You may have been relying on the previous behavior,
even without realizing it, as the default timeout is 1 second.
As a cluster administrator, you can disable the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) `ExecProbeTimeout` (set it to `false`)
on each kubelet to restore the behavior from older versions, then remove that override
once all the exec probes in the cluster have a `timeoutSeconds` value set.
If you have pods that are impacted from the default 1 second timeout,
you should update their probe timeout so that you're ready for the
eventual removal of that feature gate.
With the fix of the defect, for exec probes, on Kubernetes `1.20+` with the `dockershim` container runtime,
the process inside the container may keep running even after probe returned failure because of the timeout.
{{< /note >}}
{{< caution >}}
Incorrect implementation of readiness probes may result in an ever growing number
of processes in the container, and resource starvation if this is left unchecked.
{{< /caution >}}
### HTTP probes
[HTTP probes](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#httpgetaction-v1-core)
have additional fields that can be set on `httpGet`:
@ -357,6 +381,36 @@ and the Pod's `hostNetwork` field is true. Then `host`, under `httpGet`, should
to 127.0.0.1. If your pod relies on virtual hosts, which is probably the more common
case, you should not use `host`, but rather set the `Host` header in `httpHeaders`.
For an HTTP probe, the kubelet sends two request headers in addition to the mandatory `Host` header:
`User-Agent`, and `Accept`. The default values for these headers are `kube-probe/{{< skew latestVersion >}}`
(where `{{< skew latestVersion >}}` is the version of the kubelet ), and `*/*` respectively.
You can override the default headers by defining `.httpHeaders` for the probe; for example
```yaml
livenessProbe:
httpHeaders:
Accept: application/json
startupProbe:
httpHeaders:
User-Agent: MyUserAgent
```
You can also remove these two headers by defining them with an empty value.
```yaml
livenessProbe:
httpHeaders:
Accept: ""
startupProbe:
httpHeaders:
User-Agent: ""
```
### TCP probes
For a TCP probe, the kubelet makes the probe connection at the node, not in the pod, which
means that you can not use a service name in the `host` parameter since the kubelet is unable
to resolve it.
@ -374,7 +428,3 @@ You can also read the API references for:
* [Pod](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core)
* [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core)
* [Probe](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#probe-v1-core)

View File

@ -286,15 +286,16 @@ TODO: Test and explain how to use additional non-K8s secrets with an existing se
## Service Account Token Volume Projection
{{< feature-state for_k8s_version="v1.12" state="beta" >}}
{{< feature-state for_k8s_version="v1.20" state="stable" >}}
{{< note >}}
This ServiceAccountTokenVolumeProjection is __beta__ in 1.12 and
enabled by passing all of the following flags to the API server:
To enable and use token request projection, you must specify each of the following
command line arguments to `kube-apiserver`:
* `--service-account-issuer`
* `--service-account-key-file`
* `--service-account-signing-key-file`
* `--service-account-api-audiences`
* `--api-audiences`
{{< /note >}}
@ -324,7 +325,7 @@ The application is responsible for reloading the token when it rotates. Periodic
## Service Account Issuer Discovery
{{< feature-state for_k8s_version="v1.18" state="alpha" >}}
{{< feature-state for_k8s_version="v1.20" state="beta" >}}
The Service Account Issuer Discovery feature is enabled by enabling the
`ServiceAccountIssuerDiscovery` [feature gate](/docs/reference/command-line-tools-reference/feature-gates)
@ -385,5 +386,3 @@ See also:
- [Cluster Admin Guide to Service Accounts](/docs/reference/access-authn-authz/service-accounts-admin/)
- [Service Account Signing Key Retrieval KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/20190730-oidc-discovery.md)
- [OIDC Discovery Spec](https://openid.net/specs/openid-connect-discovery-1_0.html)

View File

@ -149,7 +149,7 @@ exit
## Configure volume permission and ownership change policy for Pods
{{< feature-state for_k8s_version="v1.18" state="alpha" >}}
{{< feature-state for_k8s_version="v1.20" state="beta" >}}
By default, Kubernetes recursively changes ownership and permissions for the contents of each
volume to match the `fsGroup` specified in a Pod's `securityContext` when that volume is

View File

@ -91,18 +91,16 @@ The examples in this section require the `EphemeralContainers` [feature gate](
cluster and `kubectl` version v1.18 or later.
{{< /note >}}
You can use the `kubectl alpha debug` command to add ephemeral containers to a
You can use the `kubectl debug` command to add ephemeral containers to a
running Pod. First, create a pod for the example:
```shell
kubectl run ephemeral-demo --image=k8s.gcr.io/pause:3.1 --restart=Never
```
{{< note >}}
This section use the `pause` container image in examples because it does not
The examples in this section use the `pause` container image because it does not
contain userland debugging utilities, but this method works with all container
images.
{{< /note >}}
If you attempt to use `kubectl exec` to create a shell you will see an error
because there is no shell in this container image.
@ -115,12 +113,12 @@ kubectl exec -it ephemeral-demo -- sh
OCI runtime exec failed: exec failed: container_linux.go:346: starting container process caused "exec: \"sh\": executable file not found in $PATH": unknown
```
You can instead add a debugging container using `kubectl alpha debug`. If you
You can instead add a debugging container using `kubectl debug`. If you
specify the `-i`/`--interactive` argument, `kubectl` will automatically attach
to the console of the Ephemeral Container.
```shell
kubectl alpha debug -it ephemeral-demo --image=busybox --target=ephemeral-demo
kubectl debug -it ephemeral-demo --image=busybox --target=ephemeral-demo
```
```
@ -172,20 +170,171 @@ Use `kubectl delete` to remove the Pod when you're finished:
kubectl delete pod ephemeral-demo
```
<!--
Planned future sections include:
## Debugging using a copy of the Pod
* Debugging with a copy of the pod
Sometimes Pod configuration options make it difficult to troubleshoot in certain
situations. For example, you can't run `kubectl exec` to troubleshoot your
container if your container image does not include a shell or if your application
crashes on startup. In these situations you can use `kubectl debug` to create a
copy of the Pod with configuration values changed to aid debugging.
See https://git.k8s.io/enhancements/keps/sig-cli/20190805-kubectl-debug.md
-->
### Copying a Pod while adding a new container
Adding a new container can be useful when your application is running but not
behaving as you expect and you'd like to add additional troubleshooting
utilities to the Pod.
For example, maybe your application's container images are built on `busybox`
but you need debugging utilities not included in `busybox`. You can simulate
this scenario using `kubectl run`:
```shell
kubectl run myapp --image=busybox --restart=Never -- sleep 1d
```
Run this command to create a copy of `myapp` named `myapp-copy` that adds a
new Ubuntu container for debugging:
```shell
kubectl debug myapp -it --image=ubuntu --share-processes --copy-to=myapp-debug
```
```
Defaulting debug container name to debugger-w7xmf.
If you don't see a command prompt, try pressing enter.
root@myapp-debug:/#
```
{{< note >}}
* `kubectl debug` automatically generates a container name if you don't choose
one using the `--container` flag.
* The `-i` flag causes `kubectl debug` to attach to the new container by
default. You can prevent this by specifying `--attach=false`. If your session
becomes disconnected you can reattach using `kubectl attach`.
* The `--share-processes` allows the containers in this Pod to see processes
from the other containers in the Pod. For more information about how this
works, see [Share Process Namespace between Containers in a Pod](
/docs/tasks/configure-pod-container/share-process-namespace/).
{{< /note >}}
Don't forget to clean up the debugging Pod when you're finished with it:
```shell
kubectl delete pod myapp myapp-debug
```
### Copying a Pod while changing its command
Sometimes it's useful to change the command for a container, for example to
add a debugging flag or because the application is crashing.
To simulate a crashing application, use `kubectl run` to create a container
that immediately exits:
```
kubectl run --image=busybox myapp -- false
```
You can see using `kubectl describe pod myapp` that this container is crashing:
```
Containers:
myapp:
Image: busybox
...
Args:
false
State: Waiting
Reason: CrashLoopBackOff
Last State: Terminated
Reason: Error
Exit Code: 1
```
You can use `kubectl debug` to create a copy of this Pod with the command
changed to an interactive shell:
```
kubectl debug myapp -it --copy-to=myapp-debug --container=myapp -- sh
```
```
If you don't see a command prompt, try pressing enter.
/ #
```
Now you have an interactive shell that you can use to perform tasks like
checking filesystem paths or running the container command manually.
{{< note >}}
* To change the command of a specific container you must
specify its name using `--container` or `kubectl debug` will instead
create a new container to run the command you specified.
* The `-i` flag causes `kubectl debug` to attach to the container by default.
You can prevent this by specifying `--attach=false`. If your session becomes
disconnected you can reattach using `kubectl attach`.
{{< /note >}}
Don't forget to clean up the debugging Pod when you're finished with it:
```shell
kubectl delete pod myapp myapp-debug
```
### Copying a Pod while changing container images
In some situations you may want to change a misbehaving Pod from its normal
production container images to an image containing a debugging build or
additional utilities.
As an example, create a Pod using `kubectl run`:
```
kubectl run myapp --image=busybox --restart=Never -- sleep 1d
```
Now use `kubectl debug` to make a copy and change its container image
to `ubuntu`:
```
kubectl debug myapp --copy-to=myapp-debug --set-image=*=ubuntu
```
The syntax of `--set-image` uses the same `container_name=image` syntax as
`kubectl set image`. `*=ubuntu` means change the image of all containers
to `ubuntu`.
Don't forget to clean up the debugging Pod when you're finished with it:
```shell
kubectl delete pod myapp myapp-debug
```
## Debugging via a shell on the node {#node-shell-session}
If none of these approaches work, you can find the host machine that the pod is
running on and SSH into that host, but this should generally not be necessary
given tools in the Kubernetes API. Therefore, if you find yourself needing to
ssh into a machine, please file a feature request on GitHub describing your use
case and why these tools are insufficient.
If none of these approaches work, you can find the Node on which the Pod is
running and create a privileged Pod running in the host namespaces. To create
an interactive shell on a node using `kubectl debug`, run:
```shell
kubectl debug node/mynode -it --image=ubuntu
```
```
Creating debugging pod node-debugger-mynode-pdx84 with container debugger on node mynode.
If you don't see a command prompt, try pressing enter.
root@ek8s:/#
```
When creating a debugging session on a node, keep in mind that:
* `kubectl debug` automatically generates the name of the new Pod based on
the name of the Node.
* The container runs in the host IPC, Network, and PID namespaces.
* The root filesystem of the Node will be mounted at `/host`.
Don't forget to clean up the debugging Pod when you're finished with it:
```shell
kubectl delete pod node-debugger-mynode-pdx84
```

View File

@ -361,7 +361,7 @@ to clients, `kubectl` also checks for unknown fields and rejects those objects w
#### Controlling pruning
By default, all unspecified fields for a custom resource, across all versions, are pruned. It is possible though to opt-out of that for specifc sub-trees of fields by adding `x-kubernetes-preserve-unknown-fields: true` in the [structural OpenAPI v3 validation schema](#specifying-a-structural-schema).
By default, all unspecified fields for a custom resource, across all versions, are pruned. It is possible though to opt-out of that for specifc sub-trees of fields by adding `x-kubernetes-preserve-unknown-fields: true` in the [structural OpenAPI v3 validation schema](#specifying-a-structural-schema).
For example:
```yaml
@ -563,7 +563,7 @@ Additionally, the following restrictions are applied to the schema:
- The field `additionalProperties` is mutually exclusive with `properties`.
The `default` field can be set when the [Defaulting feature](#defaulting) is enabled,
which is the case with `apiextensions.k8s.io/v1` CustomResourceDefinitions.
which is the case with `apiextensions.k8s.io/v1` CustomResourceDefinitions.
Defaulting is in GA since 1.17 (beta since 1.16 with the `CustomResourceDefaulting`
[feature gate](/docs/reference/command-line-tools-reference/feature-gates/)
enabled, which is the case automatically for many clusters for beta features).
@ -761,6 +761,48 @@ Default values must be pruned (with the exception of defaults for `metadata` fie
Default values for `metadata` fields of `x-kubernetes-embedded-resources: true` nodes (or parts of a default value covering `metadata`) are not pruned during CustomResourceDefinition creation, but through the pruning step during handling of requests.
#### Defaulting and Nullable
**New in 1.20:** null values for fields that either don't specify the nullable flag, or give it a `false` value, will be pruned before defaulting happens. If a default is present, it will be applied. When nullable is `true`, null values will be conserved and won't be defaulted.
For example, given the OpenAPI schema below:
```yaml
type: object
properties:
spec:
type: object
properties:
foo:
type: string
nullable: false
default: "default"
bar:
type: string
nullable: true
baz:
type: string
```
creating an object with null values for `foo` and `bar` and `baz`
```yaml
spec:
foo: null
bar: null
baz: null
```
leads to
```yaml
spec:
foo: "default"
bar: null
```
with `foo` pruned and defaulted because the field is non-nullable, `bar` maintaining the null value due to `nullable: true`, and `baz` pruned because the field is non-nullable and has no default.
### Publish Validation Schema in OpenAPI v2
CustomResourceDefinition [OpenAPI v3 validation schemas](#validation) which are [structural](#specifying-a-structural-schema) and [enable pruning](#field-pruning) are published as part of the [OpenAPI v2 spec](/docs/concepts/overview/kubernetes-api/#openapi-and-swagger-definitions) from Kubernetes API server.

View File

@ -202,16 +202,18 @@ variables and `downwardAPI` volumes:
* Information available via `fieldRef`:
* `metadata.name` - the pod's name
* `metadata.namespace` - the pod's namespace
* `metadata.uid` - the pod's UID, available since v1.8.0-alpha.2
* `metadata.labels['<KEY>']` - the value of the pod's label `<KEY>` (for example, `metadata.labels['mylabel']`); available in Kubernetes 1.9+
* `metadata.annotations['<KEY>']` - the value of the pod's annotation `<KEY>` (for example, `metadata.annotations['myannotation']`); available in Kubernetes 1.9+
* `metadata.uid` - the pod's UID
* `metadata.labels['<KEY>']` - the value of the pod's label `<KEY>` (for example, `metadata.labels['mylabel']`)
* `metadata.annotations['<KEY>']` - the value of the pod's annotation `<KEY>` (for example, `metadata.annotations['myannotation']`)
* Information available via `resourceFieldRef`:
* A Container's CPU limit
* A Container's CPU request
* A Container's memory limit
* A Container's memory request
* A Container's ephemeral-storage limit, available since v1.8.0-beta.0
* A Container's ephemeral-storage request, available since v1.8.0-beta.0
* A Container's hugepages limit (providing that the `DownwardAPIHugePages` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled)
* A Container's hugepages request (providing that the `DownwardAPIHugePages` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled)
* A Container's ephemeral-storage limit
* A Container's ephemeral-storage request
In addition, the following information is available through
`downwardAPI` volume `fieldRef`:

View File

@ -0,0 +1,141 @@
---
title: Configure a kubelet image credential provider
reviewers:
- liggitt
- cheftako
description: Configure the kubelet's image credential provider plugin
content_type: task
---
{{< feature-state for_k8s_version="v1.20" state="alpha" >}}
<!-- overview -->
Starting from Kubernetes v1.20, the kubelet can dynamically retrieve credentials for a container image registry
using exec plugins. The kubelet and the exec plugin communicate through stdio (stdin, stdout, and stderr) using
Kubernetes versioned APIs. These plugins allow the kubelet to request credentials for a container registry dynamically
as opposed to storing static credentials on disk. For example, the plugin may talk to a local metadata server to retrieve
short-lived credentials for an image that is being pulled by the kubelet.
You may be interested in using this capability if any of the below are true:
* API calls to a cloud provider service are required to retrieve authentication information for a registry.
* Credentials have short expiration times and requesting new credentials frequently is required.
* Storing registry credentials on disk or in imagePullSecrets is not acceptable.
This guide demonstrates how to configure the kubelet's image credential provider plugin mechanism.
## {{% heading "prerequisites" %}}
* The kubelet image credential provider is introduced in v1.20 as an alpha feature. As with other alpha features,
a feature gate `KubeletCredentialProviders` must be enabled on only the kubelet for the feature to work.
* A working implementation of a credential provider exec plugin. You can build your own plugin or use one provided by cloud providers.
<!-- steps -->
## Installing Plugins on Nodes
A credential provider plugin is an executable binary that will be run by the kubelet. Ensure that the plugin binary exists on
every node in your cluster and stored in a known directory. The directory will be required later when configuring kubelet flags.
## Configuring the Kubelet
In order to use this feature, the kubelet expects two flags to be set:
* `--image-credential-provider-config` - the path to the credential provider plugin config file.
* `--image-credential-provider-bin-dir` - the path to the directory where credential provider plugin binaries are located.
### Configure a kubelet credential provider
The configuration file passed into `--image-credential-provider-config` is read by the kubelet to determine which exec plugins
should be invoked for which container images. Here's an example configuration file you may end up using if you are using the [ECR](https://aws.amazon.com/ecr/)-based plugin:
```yaml
kind: CredentialProviderConfig
apiVersion: kubelet.config.k8s.io/v1alpha1
# providers is a list of credential provider plugins that will be enabled by the kubelet.
# Multiple providers may match against a single image, in which case credentials
# from all providers will be returned to the kubelet. If multiple providers are called
# for a single image, the results are combined. If providers return overlapping
# auth keys, the value from the provider earlier in this list is used.
providers:
# name is the required name of the credential provider. It must match the name of the
# provider executable as seen by the kubelet. The executable must be in the kubelet's
# bin directory (set by the --image-credential-provider-bin-dir flag).
- name: ecr
# matchImages is a required list of strings used to match against images in order to
# determine if this provider should be invoked. If one of the strings matches the
# requested image from the kubelet, the plugin will be invoked and given a chance
# to provide credentials. Images are expected to contain the registry domain
# and URL path.
#
# Each entry in matchImages is a pattern which can optionally contain a port and a path.
# Globs can be used in the domain, but not in the port or the path. Globs are supported
# as subdomains like '*.k8s.io' or 'k8s.*.io', and top-level-domains such as 'k8s.*'.
# Matching partial subdomains like 'app*.k8s.io' is also supported. Each glob can only match
# a single subdomain segment, so *.io does not match *.k8s.io.
#
# A match exists between an image and a matchImage when all of the below are true:
# - Both contain the same number of domain parts and each part matches.
# - The URL path of an imageMatch must be a prefix of the target image URL path.
# - If the imageMatch contains a port, then the port must match in the image as well.
#
# Example values of matchImages:
# - 123456789.dkr.ecr.us-east-1.amazonaws.com
# - *.azurecr.io
# - gcr.io
# - *.*.registry.io
# - registry.io:8080/path
matchImages:
- "*.dkr.ecr.*.amazonaws.com"
- "*.dkr.ecr.*.amazonaws.cn"
- "*.dkr.ecr-fips.*.amazonaws.com"
- "*.dkr.ecr.us-iso-east-1.c2s.ic.gov"
- "*.dkr.ecr.us-isob-east-1.sc2s.sgov.gov"
# defaultCacheDuration is the default duration the plugin will cache credentials in-memory
# if a cache duration is not provided in the plugin response. This field is required.
defaultCacheDuration: "12h"
# Required input version of the exec CredentialProviderRequest. The returned CredentialProviderResponse
# MUST use the same encoding version as the input. Current supported values are:
# - credentialprovider.kubelet.k8s.io/v1alpha1
apiVersion: credentialprovider.kubelet.k8s.io/v1alpha1
# Arguments to pass to the command when executing it.
# +optional
args:
- get-credentials
# Env defines additional environment variables to expose to the process. These
# are unioned with the host's environment, as well as variables client-go uses
# to pass argument to the plugin.
# +optional
env:
- name: AWS_PROFILE
value: example_profile
```
The `providers` field is a list of enabled plugins used by the kubelet. Each entry has a few required fields:
* `name`: the name of the plugin which MUST match the name of the executable binary that exists in the directory passed into `--image-credential-provider-bin-dir`.
* `matchImages`: a list of strings used to match against images in order to determine if this provider should be invoked. More on this below.
* `defaultCacheDuration`: the default duration the kubelet will cache credentials in-memory if a cache duration was not specified by the plugin.
* `apiVersion`: the api version that the kubelet and the exec plugin will use when communicating.
Each credential provider can also be given optional args and environment variables as well. Consult the plugin implementors to determine what set of arguments and environment variables are required for a given plugin.
#### Configure image matching
The `matchImages` field for each credential provider is used by the kubelet to determine whether a plugin should be invoked
for a given image that a Pod is using. Each entry in `matchImages` is an image pattern which can optionally contain a port and a path.
Globs can be used in the domain, but not in the port or the path. Globs are supported as subdomains like `*.k8s.io` or `k8s.*.io`,
and top-level domains such as `k8s.*`. Matching partial subdomains like `app*.k8s.io` is also supported. Each glob can only match
a single subdomain segment, so `*.io` does NOT match `*.k8s.io`.
A match exists between an image name and a `matchImage` entry when all of the below are true:
* Both contain the same number of domain parts and each part matches.
* The URL path of match image must be a prefix of the target image URL path.
* If the imageMatch contains a port, then the port must match in the image as well.
Some example values of `matchImages` patterns are:
* `123456789.dkr.ecr.us-east-1.amazonaws.com`
* `*.azurecr.io`
* `gcr.io`
* `*.*.registry.io`
* `foo.registry.io:8080/path`

View File

@ -2,7 +2,8 @@
reviewers:
- lachie83
- khenidak
min-kubernetes-server-version: v1.16
- bridgetkromhout
min-kubernetes-server-version: v1.20
title: Validate IPv4/IPv6 dual-stack
content_type: task
---
@ -97,31 +98,31 @@ a00:100::4 pod01
## Validate Services
Create the following Service without the `ipFamily` field set. When this field is not set, the Service gets an IP from the first configured range via `--service-cluster-ip-range` flag on the kube-controller-manager.
Create the following Service that does not explicitly define `.spec.ipFamilyPolicy`. Kubernetes will assign a cluster IP for the Service from the first configured `service-cluster-ip-range` and set the `.spec.ipFamilyPolicy` to `SingleStack`.
{{< codenew file="service/networking/dual-stack-default-svc.yaml" >}}
By viewing the YAML for the Service you can observe that the Service has the `ipFamily` field has set to reflect the address family of the first configured range set via `--service-cluster-ip-range` flag on kube-controller-manager.
Use `kubectl` to view the YAML for the Service.
```shell
kubectl get svc my-service -o yaml
```
The Service has `.spec.ipFamilyPolicy` set to `SingleStack` and `.spec.clusterIP` set to an IPv4 address from the first configured range set via `--service-cluster-ip-range` flag on kube-controller-manager.
```yaml
apiVersion: v1
kind: Service
metadata:
creationTimestamp: "2019-09-03T20:45:13Z"
labels:
app: MyApp
name: my-service
namespace: default
resourceVersion: "485836"
selfLink: /api/v1/namespaces/default/services/my-service
uid: b6fa83ef-fe7e-47a3-96a1-ac212fa5b030
spec:
clusterIP: 10.0.29.179
ipFamily: IPv4
clusterIP: 10.0.217.164
clusterIPs:
- 10.0.217.164
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- port: 80
protocol: TCP
@ -134,28 +135,100 @@ status:
loadBalancer: {}
```
Create the following Service with the `ipFamily` field set to `IPv6`.
Create the following Service that explicitly defines `IPv6` as the first array element in `.spec.ipFamilies`. Kubernetes will assign a cluster IP for the Service from the IPv6 range configured `service-cluster-ip-range` and set the `.spec.ipFamilyPolicy` to `SingleStack`.
{{< codenew file="service/networking/dual-stack-ipv6-svc.yaml" >}}
{{< codenew file="service/networking/dual-stack-ipfamilies-ipv6.yaml" >}}
Validate that the Service gets a cluster IP address from the IPv6 address block. You may then validate access to the service via the IP and port.
Use `kubectl` to view the YAML for the Service.
```shell
kubectl get svc my-service -o yaml
```
kubectl get svc -l app=MyApp
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
my-service ClusterIP fe80:20d::d06b <none> 80/TCP 9s
The Service has `.spec.ipFamilyPolicy` set to `SingleStack` and `.spec.clusterIP` set to an IPv6 address from the IPv6 range set via `--service-cluster-ip-range` flag on kube-controller-manager.
```yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: MyApp
name: my-service
spec:
clusterIP: fd00::5118
clusterIPs:
- fd00::5118
ipFamilies:
- IPv6
ipFamilyPolicy: SingleStack
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: MyApp
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
```
Create the following Service that explicitly defines `PreferDualStack` in `.spec.ipFamilyPolicy`. Kubernetes will assign both IPv4 and IPv6 addresses (as this cluster has dual-stack enabled) and select the `.spec.ClusterIP` from the list of `.spec.ClusterIPs` based on the address family of the first element in the `.spec.ipFamilies` array.
{{< codenew file="service/networking/dual-stack-preferred-svc.yaml" >}}
{{< note >}}
The `kubectl get svc` command will only show the primary IP in the `CLUSTER-IP` field.
```shell
kubectl get svc -l app=MyApp
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
my-service ClusterIP 10.0.216.242 <none> 80/TCP 5s
```
{{< /note >}}
Validate that the Service gets cluster IPs from the IPv4 and IPv6 address blocks using `kubectl describe`. You may then validate access to the service via the IPs and ports.
```shell
kubectl describe svc -l app=MyApp
```
```
Name: my-service
Namespace: default
Labels: app=MyApp
Annotations: <none>
Selector: app=MyApp
Type: ClusterIP
IP Family Policy: PreferDualStack
IP Families: IPv4,IPv6
IP: 10.0.216.242
IPs: 10.0.216.242,fd00::af55
Port: <unset> 80/TCP
TargetPort: 9376/TCP
Endpoints: <none>
Session Affinity: None
Events: <none>
```
### Create a dual-stack load balanced Service
If the cloud provider supports the provisioning of IPv6 enabled external load balancer, create the following Service with both the `ipFamily` field set to `IPv6` and the `type` field set to `LoadBalancer`
If the cloud provider supports the provisioning of IPv6 enabled external load balancers, create the following Service with `PreferDualStack` in `.spec.ipFamilyPolicy`, `IPv6` as the first element of the `.spec.ipFamilies` array and the `type` field set to `LoadBalancer`.
{{< codenew file="service/networking/dual-stack-ipv6-lb-svc.yaml" >}}
{{< codenew file="service/networking/dual-stack-prefer-ipv6-lb-svc.yaml" >}}
Check the Service:
```shell
kubectl get svc -l app=MyApp
```
Validate that the Service receives a `CLUSTER-IP` address from the IPv6 address block along with an `EXTERNAL-IP`. You may then validate access to the service via the IP and port.
```
kubectl get svc -l app=MyApp
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
my-service ClusterIP fe80:20d::d06b 2001:db8:f100:4002::9d37:c0d7 80:31868/TCP 30s
```shell
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
my-service LoadBalancer fd00::7ebc 2603:1030:805::5 80:30790/TCP 35s
```

View File

@ -234,6 +234,75 @@ the delay value is set too short, the scale of the replicas set may keep thrashi
usual.
{{< /note >}}
## Support for resource metrics
Any HPA target can be scaled based on the resource usage of the pods in the scaling target.
When defining the pod specification the resource requests like `cpu` and `memory` should
be specified. This is used to determine the resource utilization and used by the HPA controller
to scale the target up or down. To use resource utilization based scaling specify a metric source
like this:
```yaml
type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 60
```
With this metric the HPA controller will keep the average utilization of the pods in the scaling
target at 60%. Utilization is the ratio between the current usage of resource to the requested
resources of the pod. See [Algorithm](#algorithm-details) for more details about how the utilization
is calculated and averaged.
{{< note >}}
Since the resource usages of all the containers are summed up the total pod utilization may not
accurately represent the individual container resource usage. This could lead to situations where
a single container might be running with high usage and the HPA will not scale out because the overall
pod usage is still within acceptable limits.
{{< /note >}}
### Container Resource Metrics
{{< feature-state for_k8s_version="v1.20" state="alpha" >}}
`HorizontalPodAutoscaler` also supports a container metric source where the HPA can track the
resource usage of individual containers across a set of Pods, in order to scale the target resource.
This lets you configure scaling thresholds for the containers that matter most in a particular Pod.
For example, if you have a web application and a logging sidecar, you can scale based on the resource
use of the web application, ignoring the sidecar container and its resource use.
If you revise the target resource to have a new Pod specification with a different set of containers,
you should revise the HPA spec if that newly added container should also be used for
scaling. If the specified container in the metric source is not present or only present in a subset
of the pods then those pods are ignored and the recommendation is recalculated. See [Algorithm](#algorithm-details)
for more details about the calculation. To use container resources for autoscaling define a metric
source as follows:
```yaml
type: ContainerResource
containerResource:
name: cpu
container: application
target:
type: Utilization
averageUtilization: 60
```
In the above example the HPA controller scales the target such that the average utilization of the cpu
in the `application` container of all the pods is 60%.
{{< note >}}
If you change the name of a container that a HorizontalPodAutoscaler is tracking, you can
make that change in a specific order to ensure scaling remains available and effective
whilst the change is being applied. Before you update the resource that defines the container
(such as a Deployment), you should update the associated HPA to track both the new and
old container names. This way, the HPA is able to calculate a scaling recommendation
throughout the update process.
Once you have rolled out the container name change to the workload resource, tidy up by removing
the old container name from the HPA specification.
{{< /note >}}
## Support for multiple metrics
Kubernetes 1.6 adds support for scaling based on multiple metrics. You can use the `autoscaling/v2beta2` API

View File

@ -1,4 +1,4 @@
apiVersion: flowcontrol.apiserver.k8s.io/v1alpha1
apiVersion: flowcontrol.apiserver.k8s.io/v1beta1
kind: FlowSchema
metadata:
name: health-for-strangers

View File

@ -2,10 +2,11 @@ apiVersion: v1
kind: Service
metadata:
name: my-service
labels:
app: MyApp
spec:
selector:
app: MyApp
ports:
- protocol: TCP
port: 80
targetPort: 9376

View File

@ -2,11 +2,13 @@ apiVersion: v1
kind: Service
metadata:
name: my-service
labels:
app: MyApp
spec:
ipFamily: IPv4
ipFamilies:
- IPv6
selector:
app: MyApp
ports:
- protocol: TCP
port: 80
targetPort: 9376

View File

@ -5,11 +5,12 @@ metadata:
labels:
app: MyApp
spec:
ipFamily: IPv6
ipFamilyPolicy: PreferDualStack
ipFamilies:
- IPv6
type: LoadBalancer
selector:
app: MyApp
ports:
- protocol: TCP
port: 80
targetPort: 9376

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: my-service
labels:
app: MyApp
spec:
ipFamilyPolicy: PreferDualStack
ipFamilies:
- IPv6
- IPv4
selector:
app: MyApp
ports:
- protocol: TCP
port: 80

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: my-service
labels:
app: MyApp
spec:
ipFamilyPolicy: PreferDualStack
selector:
app: MyApp
ports:
- protocol: TCP
port: 80

View File

@ -43,10 +43,22 @@
{{- $oldestMinorVersion -}}
{{- end -}}
<!-- output latestVersionAddMinor based on captured args -->
{{- if eq $version "latestVersionAddMinor" -}}
{{- $seperator := .Get 2 -}}
{{- if eq $seperator "" -}}
{{- $seperator = "." -}}
{{- end -}}
{{- $latestVersionAddMinor := int (.Get 1) -}}
{{- $latestVersionAddMinor = add $minorVersion $latestVersionAddMinor -}}
{{- $latestVersionAddMinor = printf "%s%s%d" (index $versionArray 0) $seperator $latestVersionAddMinor -}}
{{- $latestVersionAddMinor -}}
{{- end -}}
<!--
example shortcode use:
- skew nextMinorVersion
- skew latestVersion
- skew prevMinorVersion
- skew oldestMinorVersion
- skew latestVersionAddMinor -1 "-"
-->

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long